repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
BorisJeremic/Real-ESSI-Examples
|
refs/heads/master
|
parallel/test_cases/27NodeBrick/cantilever_different_Poisson/NumberOfDivision4/PoissonRatio0.10/compare_txt.py
|
637
|
#!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# analytic_solution = sys.argv[1]
# numeric_result = sys.argv[2]
analytic_solution = 'analytic_solution.txt'
numeric_result = 'numeric_result.txt'
analytic_sol = np.loadtxt(analytic_solution)
numeric_res = np.loadtxt(numeric_result)
abs_error = abs(analytic_sol - numeric_res)
rel_error = abs_error/analytic_sol
analytic_sol = float(analytic_sol)
numeric_res = float(numeric_res)
rel_error = float(rel_error)
# print the results
case_flag=1
print headrun() , "-----------Testing results-----------------"
print headstep() ,'{0} {1} {2} '.format('analytic_solution ','numeric_result ','error[%]')
print headOK() ,'{0:+e} {1:+e} {2:+0.2f} '.format(analytic_sol, numeric_res, rel_error )
if(case_flag==1):
print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# find . -name 'element.fei' -exec bash -c 'mv $0 ${0/element.fei/add_element.include}' {} \;
# find . -name 'constraint.fei' -exec bash -c 'mv $0 ${0/constraint.fei/add_constraint.include}' {} \;
# find . -name 'node.fei' -exec bash -c 'mv $0 ${0/node.fei/add_node.include}' {} \;
# find . -name 'add_node.fei' -exec bash -c 'mv $0 ${0/add_node.fei/add_node.include}' {} \;
# find . -name 'elementLT.fei' -exec bash -c 'mv $0 ${0/elementLT.fei/add_elementLT.include}' {} \;
# sed -i "s/node\.fei/add_node.include/" main.fei
# sed -i "s/add_node\.fei/add_node.include/" main.fei
# sed -i "s/element\.fei/add_element.include/" main.fei
# sed -i "s/elementLT\.fei/add_elementLT.include/" main.fei
# sed -i "s/constraint\.fei/add_constraint.include/" main.fei
# find . -name '*_bak.h5.feioutput' -exec bash -c 'mv $0 ${0/\_bak.h5.feioutput/\_original\.h5.feioutput}' {} \;
|
ehamiter/pyufc
|
refs/heads/master
|
pyufc/__init__.py
|
1
|
from .pyufc import Fighter, PyUFCError
|
iansf/engine
|
refs/heads/master
|
sky/tools/roll/patch.py
|
27
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import utils
def patch_and_filter(dest_dir, relative_patches_dir):
os.chdir(dest_dir)
utils.filter_file("build/landmines.py",
lambda line: not "gyp_environment" in line)
utils.commit("filter gyp_environment out of build/landmines.py")
patch(dest_dir, relative_patches_dir)
def patch(dest_dir, relative_patches_dir=os.curdir):
"""Applies the *.patch files in |relative_patches_dir|.
Args:
relative_patches_dir: A directory path relative to the current directory.
Defaults to the directory of this file.
Raises:
subprocess.CalledProcessError if the patch couldn't be applied.
"""
patches_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
relative_patches_dir)
assert os.path.isdir(patches_dir)
os.chdir(dest_dir)
for p in utils.find(["*.patch"], patches_dir):
print "applying patch %s" % os.path.basename(p)
try:
utils.system(["git", "apply", p])
utils.commit("applied patch %s" % os.path.basename(p))
except subprocess.CalledProcessError:
print "ERROR: patch %s failed to apply" % os.path.basename(p)
raise
|
xen0l/ansible
|
refs/heads/devel
|
lib/ansible/plugins/callback/dense.py
|
9
|
# (c) 2016, Dag Wieers <dag@wieers.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: dense
type: stdout
short_description: minimal stdout output
extends_documentation_fragment:
- default_callback
description:
- When in verbose mode it will act the same as the default callback
author:
- Dag Wieers (@dagwieers)
version_added: "2.3"
requirements:
- set as stdout in configuation
'''
from collections import MutableMapping, MutableSequence
HAS_OD = False
try:
from collections import OrderedDict
HAS_OD = True
except ImportError:
pass
from ansible.module_utils.six import binary_type, text_type
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
from ansible.utils.color import colorize, hostcolor
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
import sys
# Design goals:
#
# + On screen there should only be relevant stuff
# - How far are we ? (during run, last line)
# - What issues occurred
# - What changes occurred
# - Diff output (in diff-mode)
#
# + If verbosity increases, act as default output
# So that users can easily switch to default for troubleshooting
#
# + Rewrite the output during processing
# - We use the cursor to indicate where in the task we are.
# Output after the prompt is the output of the previous task.
# - If we would clear the line at the start of a task, there would often
# be no information at all, so we leave it until it gets updated
#
# + Use the same color-conventions of Ansible
#
# + Ensure the verbose output (-v) is also dense.
# Remove information that is not essential (eg. timestamps, status)
# TODO:
#
# + Properly test for terminal capabilities, and fall back to default
# + Modify Ansible mechanism so we don't need to use sys.stdout directly
# + Find an elegant solution for progress bar line wrapping
# FIXME: Importing constants as C simply does not work, beats me :-/
# from ansible import constants as C
class C:
COLOR_HIGHLIGHT = 'white'
COLOR_VERBOSE = 'blue'
COLOR_WARN = 'bright purple'
COLOR_ERROR = 'red'
COLOR_DEBUG = 'dark gray'
COLOR_DEPRECATE = 'purple'
COLOR_SKIP = 'cyan'
COLOR_UNREACHABLE = 'bright red'
COLOR_OK = 'green'
COLOR_CHANGED = 'yellow'
# Taken from Dstat
class vt100:
black = '\033[0;30m'
darkred = '\033[0;31m'
darkgreen = '\033[0;32m'
darkyellow = '\033[0;33m'
darkblue = '\033[0;34m'
darkmagenta = '\033[0;35m'
darkcyan = '\033[0;36m'
gray = '\033[0;37m'
darkgray = '\033[1;30m'
red = '\033[1;31m'
green = '\033[1;32m'
yellow = '\033[1;33m'
blue = '\033[1;34m'
magenta = '\033[1;35m'
cyan = '\033[1;36m'
white = '\033[1;37m'
blackbg = '\033[40m'
redbg = '\033[41m'
greenbg = '\033[42m'
yellowbg = '\033[43m'
bluebg = '\033[44m'
magentabg = '\033[45m'
cyanbg = '\033[46m'
whitebg = '\033[47m'
reset = '\033[0;0m'
bold = '\033[1m'
reverse = '\033[2m'
underline = '\033[4m'
clear = '\033[2J'
# clearline = '\033[K'
clearline = '\033[2K'
save = '\033[s'
restore = '\033[u'
save_all = '\0337'
restore_all = '\0338'
linewrap = '\033[7h'
nolinewrap = '\033[7l'
up = '\033[1A'
down = '\033[1B'
right = '\033[1C'
left = '\033[1D'
colors = dict(
ok=vt100.darkgreen,
changed=vt100.darkyellow,
skipped=vt100.darkcyan,
ignored=vt100.cyanbg + vt100.red,
failed=vt100.darkred,
unreachable=vt100.red,
)
states = ('skipped', 'ok', 'changed', 'failed', 'unreachable')
class CallbackModule_dense(CallbackModule_default):
'''
This is the dense callback interface, where screen estate is still valued.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'dense'
def __init__(self):
# From CallbackModule
self._display = display
if HAS_OD:
self.disabled = False
self.super_ref = super(CallbackModule, self)
self.super_ref.__init__()
# Attributes to remove from results for more density
self.removed_attributes = (
# 'changed',
'delta',
# 'diff',
'end',
'failed',
'failed_when_result',
'invocation',
'start',
'stdout_lines',
)
# Initiate data structures
self.hosts = OrderedDict()
self.keep = False
self.shown_title = False
self.count = dict(play=0, handler=0, task=0)
self.type = 'foo'
# Start immediately on the first line
sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
sys.stdout.flush()
else:
display.warning("The 'dense' callback plugin requires OrderedDict which is not available in this version of python, disabling.")
self.disabled = True
def __del__(self):
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
def _add_host(self, result, status):
name = result._host.get_name()
# Add a new status in case a failed task is ignored
if status == 'failed' and result._task.ignore_errors:
status = 'ignored'
# Check if we have to update an existing state (when looping over items)
if name not in self.hosts:
self.hosts[name] = dict(state=status)
elif states.index(self.hosts[name]['state']) < states.index(status):
self.hosts[name]['state'] = status
# Store delegated hostname, if needed
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self.hosts[name]['delegate'] = delegated_vars['ansible_host']
# Print progress bar
self._display_progress(result)
# # Ensure that tasks with changes/failures stay on-screen, and during diff-mode
# if status in ['changed', 'failed', 'unreachable'] or (result.get('_diff_mode', False) and result._resultget('diff', False)):
# Ensure that tasks with changes/failures stay on-screen
if status in ['changed', 'failed', 'unreachable']:
self.keep = True
if self._display.verbosity == 1:
# Print task title, if needed
self._display_task_banner()
self._display_results(result, status)
def _clean_results(self, result):
# Remove non-essential atributes
for attr in self.removed_attributes:
if attr in result:
del(result[attr])
# Remove empty attributes (list, dict, str)
for attr in result.copy():
if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)):
if not result[attr]:
del(result[attr])
def _handle_exceptions(self, result):
if 'exception' in result:
# Remove the exception from the result so it's not shown every time
del result['exception']
if self._display.verbosity == 1:
return "An exception occurred during task execution. To see the full traceback, use -vvv."
def _display_progress(self, result=None):
# Always rewrite the complete line
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline)
sys.stdout.write('%s %d:' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
# Print out each host in its own status-color
for name in self.hosts:
sys.stdout.write(' ')
if self.hosts[name].get('delegate', None):
sys.stdout.write(self.hosts[name]['delegate'] + '>')
sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset)
sys.stdout.flush()
# if result._result.get('diff', False):
# sys.stdout.write('\n' + vt100.linewrap)
sys.stdout.write(vt100.linewrap)
# self.keep = True
def _display_task_banner(self):
if not self.shown_title:
self.shown_title = True
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip()))
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
def _display_results(self, result, status):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
self._clean_results(result._result)
dump = ''
if result._task.action == 'include':
return
elif status == 'ok':
return
elif status == 'ignored':
dump = self._handle_exceptions(result._result)
elif status == 'failed':
dump = self._handle_exceptions(result._result)
elif status == 'unreachable':
dump = result._result['msg']
if not dump:
dump = self._dump_results(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
sys.stdout.write(colors[status] + status + ': ')
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host'])
else:
sys.stdout.write(result._host.get_name())
sys.stdout.write(': ' + dump + '\n')
sys.stdout.write(vt100.reset + vt100.save + vt100.clearline)
sys.stdout.flush()
if status == 'changed':
self._handle_warnings(result._result)
def v2_playbook_on_play_start(self, play):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold)
# Reset at the start of each play
self.keep = False
self.count.update(dict(handler=0, task=0))
self.count['play'] += 1
self.play = play
# Write the next play on screen IN UPPERCASE, and make it permanent
name = play.get_name().strip()
if not name:
name = 'unnamed'
sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper()))
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
def v2_playbook_on_task_start(self, task, is_conditional):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
else:
# Do not clear line, since we want to retain the previous output
sys.stdout.write(vt100.restore + vt100.reset + vt100.underline)
# Reset at the start of each task
self.keep = False
self.shown_title = False
self.hosts = OrderedDict()
self.task = task
self.type = 'task'
# Enumerate task if not setup (task names are too long for dense output)
if task.get_name() != 'setup':
self.count['task'] += 1
# Write the next task on screen (behind the prompt is the previous output)
sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
def v2_playbook_on_handler_task_start(self, task):
# Leave the previous task on screen (as it has changes/errors)
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline)
# Reset at the start of each handler
self.keep = False
self.shown_title = False
self.hosts = OrderedDict()
self.task = task
self.type = 'handler'
# Enumerate handler if not setup (handler names may be too long for dense output)
if task.get_name() != 'setup':
self.count[self.type] += 1
# Write the next task on screen (behind the prompt is the previous output)
sys.stdout.write('%s %d.' % (self.type, self.count[self.type]))
sys.stdout.write(vt100.reset)
sys.stdout.flush()
def v2_playbook_on_cleanup_task_start(self, task):
# TBD
sys.stdout.write('cleanup.')
sys.stdout.flush()
def v2_runner_on_failed(self, result, ignore_errors=False):
self._add_host(result, 'failed')
def v2_runner_on_ok(self, result):
if result._result.get('changed', False):
self._add_host(result, 'changed')
else:
self._add_host(result, 'ok')
def v2_runner_on_skipped(self, result):
self._add_host(result, 'skipped')
def v2_runner_on_unreachable(self, result):
self._add_host(result, 'unreachable')
def v2_runner_on_include(self, included_file):
pass
def v2_runner_on_file_diff(self, result, diff):
sys.stdout.write(vt100.bold)
self.super_ref.v2_runner_on_file_diff(result, diff)
sys.stdout.write(vt100.reset)
def v2_on_file_diff(self, result):
sys.stdout.write(vt100.bold)
self.super_ref.v2_on_file_diff(result)
sys.stdout.write(vt100.reset)
# Old definition in v2.0
def v2_playbook_item_on_ok(self, result):
self.v2_runner_item_on_ok(result)
def v2_runner_item_on_ok(self, result):
if result._result.get('changed', False):
self._add_host(result, 'changed')
else:
self._add_host(result, 'ok')
# Old definition in v2.0
def v2_playbook_item_on_failed(self, result):
self.v2_runner_item_on_failed(result)
def v2_runner_item_on_failed(self, result):
self._add_host(result, 'failed')
# Old definition in v2.0
def v2_playbook_item_on_skipped(self, result):
self.v2_runner_item_on_skipped(result)
def v2_runner_item_on_skipped(self, result):
self._add_host(result, 'skipped')
def v2_playbook_on_no_hosts_remaining(self):
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
self.keep = False
sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT')
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
def v2_playbook_on_include(self, included_file):
pass
def v2_playbook_on_stats(self, stats):
if self._display.verbosity == 0 and self.keep:
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
else:
sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline)
# In normal mode screen output should be sufficient, summary is redundant
if self._display.verbosity == 0:
return
sys.stdout.write(vt100.bold + vt100.underline)
sys.stdout.write('SUMMARY')
sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline)
sys.stdout.flush()
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
screen_only=True
)
# When using -vv or higher, simply do the default action
if display.verbosity >= 2 or not HAS_OD:
CallbackModule = CallbackModule_default
else:
CallbackModule = CallbackModule_dense
|
shivam1111/odoo
|
refs/heads/8.0
|
addons/purchase/stock.py
|
15
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
def get_price_unit(self, cr, uid, move, context=None):
""" Returns the unit price to store on the quant """
if move.purchase_line_id:
return move.price_unit
return super(stock_move, self).get_price_unit(cr, uid, move, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_move, self).write(cr, uid, ids, vals, context=context)
from openerp import workflow
if vals.get('state') in ['done', 'cancel']:
for move in self.browse(cr, uid, ids, context=context):
if move.purchase_line_id and move.purchase_line_id.order_id:
order_id = move.purchase_line_id.order_id.id
# update linked purchase order as superuser as the warehouse
# user may not have rights to access purchase.order
if self.pool.get('purchase.order').test_moves_done(cr, uid, [order_id], context=context):
workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_done', cr)
if self.pool.get('purchase.order').test_moves_except(cr, uid, [order_id], context=context):
workflow.trg_validate(SUPERUSER_ID, 'purchase.order', order_id, 'picking_cancel', cr)
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
context = context or {}
if not default.get('split_from'):
#we don't want to propagate the link to the purchase order line except in case of move split
default['purchase_line_id'] = False
return super(stock_move, self).copy(cr, uid, id, default, context)
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
if move.purchase_line_id:
invoice_line_vals['purchase_line_id'] = move.purchase_line_id.id
invoice_line_vals['account_analytic_id'] = move.purchase_line_id.account_analytic_id.id or False
invoice_line_id = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
if move.purchase_line_id:
purchase_line = move.purchase_line_id
self.pool.get('purchase.order.line').write(cr, uid, [purchase_line.id], {
'invoice_lines': [(4, invoice_line_id)]
}, context=context)
self.pool.get('purchase.order').write(cr, uid, [purchase_line.order_id.id], {
'invoice_ids': [(4, invoice_line_vals['invoice_id'])],
})
purchase_line_obj = self.pool.get('purchase.order.line')
purchase_obj = self.pool.get('purchase.order')
invoice_line_obj = self.pool.get('account.invoice.line')
purchase_id = move.purchase_line_id.order_id.id
purchase_line_ids = purchase_line_obj.search(cr, uid, [('order_id', '=', purchase_id), ('invoice_lines', '=', False), '|', ('product_id', '=', False), ('product_id.type', '=', 'service')], context=context)
if purchase_line_ids:
inv_lines = []
for po_line in purchase_line_obj.browse(cr, uid, purchase_line_ids, context=context):
acc_id = purchase_obj._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = purchase_obj._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = invoice_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
invoice_line_obj.write(cr, uid, inv_lines, {'invoice_id': invoice_line_vals['invoice_id']}, context=context)
return invoice_line_id
def _get_master_data(self, cr, uid, move, company, context=None):
if context.get('inv_type') == 'in_invoice' and move.purchase_line_id:
purchase_order = move.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
if context.get('inv_type') == 'in_refund' and move.origin_returned_move_id.purchase_line_id:
purchase_order = move.origin_returned_move_id.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
elif context.get('inv_type') in ('in_invoice', 'in_refund') and move.picking_id:
# In case of an extra move, it is better to use the data from the original moves
for purchase_move in move.picking_id.move_lines:
if purchase_move.purchase_line_id:
purchase_order = purchase_move.purchase_line_id.order_id
return purchase_order.partner_id, purchase_order.create_uid.id, purchase_order.currency_id.id
partner = move.picking_id and move.picking_id.partner_id or False
code = self.get_code_from_locs(cr, uid, move, context=context)
if partner and partner.property_product_pricelist_purchase and code == 'incoming':
currency = partner.property_product_pricelist_purchase.currency_id.id
return partner, uid, currency
return super(stock_move, self)._get_master_data(cr, uid, move, company, context=context)
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
if inv_type == 'in_invoice' and move.purchase_line_id:
purchase_line = move.purchase_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in purchase_line.taxes_id])]
res['price_unit'] = purchase_line.price_unit
elif inv_type == 'in_refund' and move.origin_returned_move_id.purchase_line_id:
purchase_line = move.origin_returned_move_id.purchase_line_id
res['invoice_line_tax_id'] = [(6, 0, [x.id for x in purchase_line.taxes_id])]
res['price_unit'] = purchase_line.price_unit
return res
def _get_moves_taxes(self, cr, uid, moves, inv_type, context=None):
is_extra_move, extra_move_tax = super(stock_move, self)._get_moves_taxes(cr, uid, moves, inv_type, context=context)
if inv_type == 'in_invoice':
for move in moves:
if move.purchase_line_id:
is_extra_move[move.id] = False
extra_move_tax[move.picking_id, move.product_id] = [(6, 0, [x.id for x in move.purchase_line_id.taxes_id])]
elif move.product_id.product_tmpl_id.supplier_taxes_id:
mov_id = self.search(cr, uid, [('purchase_line_id', '!=', False), ('picking_id', '=', move.picking_id.id)], limit=1, context=context)
if mov_id:
mov = self.browse(cr, uid, mov_id[0], context=context)
fp = mov.purchase_line_id.order_id.fiscal_position
res = self.pool.get("account.invoice.line").product_id_change(cr, uid, [], move.product_id.id, None, partner_id=move.picking_id.partner_id.id, fposition_id=(fp and fp.id), type='in_invoice', context=context)
extra_move_tax[0, move.product_id] = [(6, 0, res['value']['invoice_line_tax_id'])]
return (is_extra_move, extra_move_tax)
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
# The method attribute_price of the parent class sets the price to the standard product
# price if move.price_unit is zero. We don't want this behavior in the case of a purchase
# order since we can purchase goods which are free of charge (e.g. 5 units offered if 100
# are purchased).
if move.purchase_line_id:
return
code = self.get_code_from_locs(cr, uid, move, context=context)
if not move.purchase_line_id and code == 'incoming' and not move.price_unit:
partner = move.picking_id and move.picking_id.partner_id or False
price = False
# If partner given, search price in its purchase pricelist
if partner and partner.property_product_pricelist_purchase:
pricelist_obj = self.pool.get("product.pricelist")
pricelist = partner.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist],
move.product_id.id, move.product_uom_qty, partner, {
'uom': move.product_uom.id,
'date': move.date,
})[pricelist]
if price:
return self.write(cr, uid, [move.id], {'price_unit': price}, context=context)
super(stock_move, self).attribute_price(cr, uid, move, context=context)
def _get_taxes(self, cr, uid, move, context=None):
if move.origin_returned_move_id.purchase_line_id.taxes_id:
return [tax.id for tax in move.origin_returned_move_id.purchase_line_id.taxes_id]
return super(stock_move, self)._get_taxes(cr, uid, move, context=context)
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def _get_to_invoice(self, cr, uid, ids, name, args, context=None):
res = {}
for picking in self.browse(cr, uid, ids, context=context):
res[picking.id] = False
for move in picking.move_lines:
if move.purchase_line_id and move.purchase_line_id.order_id.invoice_method == 'picking':
if not move.move_orig_ids:
res[picking.id] = True
return res
def _get_picking_to_recompute(self, cr, uid, ids, context=None):
picking_ids = set()
for move in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
if move.picking_id and move.purchase_line_id:
picking_ids.add(move.picking_id.id)
return list(picking_ids)
_columns = {
'reception_to_invoice': fields.function(_get_to_invoice, type='boolean', string='Invoiceable on incoming shipment?',
help='Does the picking contains some moves related to a purchase order invoiceable on the receipt?',
store={
'stock.move': (_get_picking_to_recompute, ['purchase_line_id', 'picking_id'], 10),
}),
}
def _create_invoice_from_picking(self, cr, uid, picking, vals, context=None):
purchase_obj = self.pool.get("purchase.order")
purchase_line_obj = self.pool.get('purchase.order.line')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_id = super(stock_picking, self)._create_invoice_from_picking(cr, uid, picking, vals, context=context)
return invoice_id
def _get_invoice_vals(self, cr, uid, key, inv_type, journal_id, move, context=None):
inv_vals = super(stock_picking, self)._get_invoice_vals(cr, uid, key, inv_type, journal_id, move, context=context)
if move.purchase_line_id and move.purchase_line_id.order_id:
purchase = move.purchase_line_id.order_id
inv_vals.update({
'fiscal_position': purchase.fiscal_position.id,
'payment_term': purchase.payment_term_id.id,
})
return inv_vals
class stock_warehouse(osv.osv):
_inherit = 'stock.warehouse'
_columns = {
'buy_to_resupply': fields.boolean('Purchase to resupply this warehouse',
help="When products are bought, they can be delivered to this warehouse"),
'buy_pull_id': fields.many2one('procurement.rule', 'Buy rule'),
}
_defaults = {
'buy_to_resupply': True,
}
def _get_buy_pull_rule(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
buy_route_id = data_obj.get_object_reference(cr, uid, 'purchase', 'route_warehouse0_buy')[1]
except:
buy_route_id = route_obj.search(cr, uid, [('name', 'like', _('Buy'))], context=context)
buy_route_id = buy_route_id and buy_route_id[0] or False
if not buy_route_id:
raise osv.except_osv(_('Error!'), _('Can\'t find any generic Buy route.'))
return {
'name': self._format_routename(cr, uid, warehouse, _(' Buy'), context=context),
'location_id': warehouse.in_type_id.default_location_dest_id.id,
'route_id': buy_route_id,
'action': 'buy',
'picking_type_id': warehouse.in_type_id.id,
'warehouse_id': warehouse.id,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
pull_obj = self.pool.get('procurement.rule')
res = super(stock_warehouse, self).create_routes(cr, uid, ids, warehouse, context=context)
if warehouse.buy_to_resupply:
buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context)
buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context)
res['buy_pull_id'] = buy_pull_id
return res
def write(self, cr, uid, ids, vals, context=None):
pull_obj = self.pool.get('procurement.rule')
if isinstance(ids, (int, long)):
ids = [ids]
if 'buy_to_resupply' in vals:
if vals.get("buy_to_resupply"):
for warehouse in self.browse(cr, uid, ids, context=context):
if not warehouse.buy_pull_id:
buy_pull_vals = self._get_buy_pull_rule(cr, uid, warehouse, context=context)
buy_pull_id = pull_obj.create(cr, uid, buy_pull_vals, context=context)
vals['buy_pull_id'] = buy_pull_id
else:
for warehouse in self.browse(cr, uid, ids, context=context):
if warehouse.buy_pull_id:
buy_pull_id = pull_obj.unlink(cr, uid, warehouse.buy_pull_id.id, context=context)
return super(stock_warehouse, self).write(cr, uid, ids, vals, context=None)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
all_routes = super(stock_warehouse, self).get_all_routes_for_wh(cr, uid, warehouse, context=context)
if warehouse.buy_to_resupply and warehouse.buy_pull_id and warehouse.buy_pull_id.route_id:
all_routes += [warehouse.buy_pull_id.route_id.id]
return all_routes
def _get_all_products_to_resupply(self, cr, uid, warehouse, context=None):
res = super(stock_warehouse, self)._get_all_products_to_resupply(cr, uid, warehouse, context=context)
if warehouse.buy_pull_id and warehouse.buy_pull_id.route_id:
for product_id in res:
for route in self.pool.get('product.product').browse(cr, uid, product_id, context=context).route_ids:
if route.id == warehouse.buy_pull_id.route_id.id:
res.remove(product_id)
break
return res
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
res = super(stock_warehouse, self)._handle_renaming(cr, uid, warehouse, name, code, context=context)
pull_obj = self.pool.get('procurement.rule')
#change the buy pull rule name
if warehouse.buy_pull_id:
pull_obj.write(cr, uid, warehouse.buy_pull_id.id, {'name': warehouse.buy_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
return res
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
res = super(stock_warehouse, self).change_route(cr, uid, ids, warehouse, new_reception_step=new_reception_step, new_delivery_step=new_delivery_step, context=context)
if warehouse.in_type_id.default_location_dest_id != warehouse.buy_pull_id.location_id:
self.pool.get('procurement.rule').write(cr, uid, warehouse.buy_pull_id.id, {'location_id': warehouse.in_type_id.default_location_dest_id.id}, context=context)
return res
|
sajeeshcs/nested_quota
|
refs/heads/master
|
nova/tests/cells/test_cells_rpc_driver.py
|
29
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells RPC Communication Driver
"""
import mox
from oslo.config import cfg
from oslo import messaging as oslo_messaging
from nova.cells import messaging
from nova.cells import rpc_driver
from nova import context
from nova import rpc
from nova import test
from nova.tests.cells import fakes
CONF = cfg.CONF
CONF.import_opt('rpc_driver_queue_base', 'nova.cells.rpc_driver',
group='cells')
class CellsRPCDriverTestCase(test.NoDBTestCase):
"""Test case for Cells communication via RPC."""
def setUp(self):
super(CellsRPCDriverTestCase, self).setUp()
fakes.init(self)
self.ctxt = context.RequestContext('fake', 'fake')
self.driver = rpc_driver.CellsRPCDriver()
def test_start_servers(self):
self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
fake_msg_runner = fakes.get_message_runner('api-cell')
class FakeInterCellRPCDispatcher(object):
def __init__(_self, msg_runner):
self.assertEqual(fake_msg_runner, msg_runner)
self.stubs.Set(rpc_driver, 'InterCellRPCDispatcher',
FakeInterCellRPCDispatcher)
self.mox.StubOutWithMock(rpc, 'get_server')
for message_type in messaging.MessageRunner.get_message_types():
topic = 'cells.intercell42.' + message_type
target = oslo_messaging.Target(topic=topic, server=CONF.host)
endpoints = [mox.IsA(FakeInterCellRPCDispatcher)]
rpcserver = self.mox.CreateMockAnything()
rpc.get_server(target, endpoints=endpoints).AndReturn(rpcserver)
rpcserver.start()
self.mox.ReplayAll()
self.driver.start_servers(fake_msg_runner)
def test_stop_servers(self):
call_info = {'stopped': []}
class FakeRPCServer(object):
def stop(self):
call_info['stopped'].append(self)
fake_servers = [FakeRPCServer() for x in xrange(5)]
self.driver.rpc_servers = fake_servers
self.driver.stop_servers()
self.assertEqual(fake_servers, call_info['stopped'])
def test_send_message_to_cell_cast(self):
msg_runner = fakes.get_message_runner('api-cell')
cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
message = messaging._TargetedMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', cell_state, fanout=False)
expected_server_params = {'hostname': 'rpc_host2',
'password': 'password2',
'port': 3092,
'username': 'username2',
'virtual_host': 'rpc_vhost2'}
expected_url = ('rabbit://%(username)s:%(password)s@'
'%(hostname)s:%(port)d/%(virtual_host)s' %
expected_server_params)
def check_transport_url(cell_state):
return cell_state.db_info['transport_url'] == expected_url
rpcapi = self.driver.intercell_rpcapi
rpcclient = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(rpcapi, '_get_client')
rpcapi._get_client(
mox.Func(check_transport_url),
'cells.intercell.targeted').AndReturn(rpcclient)
rpcclient.cast(mox.IgnoreArg(), 'process_message',
message=message.to_json())
self.mox.ReplayAll()
self.driver.send_message_to_cell(cell_state, message)
def test_send_message_to_cell_fanout_cast(self):
msg_runner = fakes.get_message_runner('api-cell')
cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
message = messaging._TargetedMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', cell_state, fanout=True)
expected_server_params = {'hostname': 'rpc_host2',
'password': 'password2',
'port': 3092,
'username': 'username2',
'virtual_host': 'rpc_vhost2'}
expected_url = ('rabbit://%(username)s:%(password)s@'
'%(hostname)s:%(port)d/%(virtual_host)s' %
expected_server_params)
def check_transport_url(cell_state):
return cell_state.db_info['transport_url'] == expected_url
rpcapi = self.driver.intercell_rpcapi
rpcclient = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(rpcapi, '_get_client')
rpcapi._get_client(
mox.Func(check_transport_url),
'cells.intercell.targeted').AndReturn(rpcclient)
rpcclient.prepare(fanout=True).AndReturn(rpcclient)
rpcclient.cast(mox.IgnoreArg(), 'process_message',
message=message.to_json())
self.mox.ReplayAll()
self.driver.send_message_to_cell(cell_state, message)
def test_rpc_topic_uses_message_type(self):
self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
msg_runner = fakes.get_message_runner('api-cell')
cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
message = messaging._BroadcastMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', fanout=True)
message.message_type = 'fake-message-type'
expected_server_params = {'hostname': 'rpc_host2',
'password': 'password2',
'port': 3092,
'username': 'username2',
'virtual_host': 'rpc_vhost2'}
expected_url = ('rabbit://%(username)s:%(password)s@'
'%(hostname)s:%(port)d/%(virtual_host)s' %
expected_server_params)
def check_transport_url(cell_state):
return cell_state.db_info['transport_url'] == expected_url
rpcapi = self.driver.intercell_rpcapi
rpcclient = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(rpcapi, '_get_client')
rpcapi._get_client(
mox.Func(check_transport_url),
'cells.intercell42.fake-message-type').AndReturn(rpcclient)
rpcclient.prepare(fanout=True).AndReturn(rpcclient)
rpcclient.cast(mox.IgnoreArg(), 'process_message',
message=message.to_json())
self.mox.ReplayAll()
self.driver.send_message_to_cell(cell_state, message)
def test_process_message(self):
msg_runner = fakes.get_message_runner('api-cell')
dispatcher = rpc_driver.InterCellRPCDispatcher(msg_runner)
message = messaging._BroadcastMessage(msg_runner,
self.ctxt, 'fake', {}, 'down', fanout=True)
call_info = {}
def _fake_message_from_json(json_message):
call_info['json_message'] = json_message
self.assertEqual(message.to_json(), json_message)
return message
def _fake_process():
call_info['process_called'] = True
self.stubs.Set(msg_runner, 'message_from_json',
_fake_message_from_json)
self.stubs.Set(message, 'process', _fake_process)
dispatcher.process_message(self.ctxt, message.to_json())
self.assertEqual(message.to_json(), call_info['json_message'])
self.assertTrue(call_info['process_called'])
|
arista-eosplus/ansible-modules-extras
|
refs/heads/devel
|
cloud/misc/ovirt.py
|
65
|
#!/usr/bin/python
# (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ovirt
author: "Vincent Van der Kussen (@vincentvdk)"
short_description: oVirt/RHEV platform management
description:
- allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
version_added: "1.4"
options:
user:
description:
- the user to authenticate with
default: null
required: true
aliases: []
url:
description:
- the url of the oVirt instance
default: null
required: true
aliases: []
instance_name:
description:
- the name of the instance to use
default: null
required: true
aliases: [ vmname ]
password:
description:
- password of the user to authenticate with
default: null
required: true
aliases: []
image:
description:
- template to use for the instance
default: null
required: false
aliases: []
resource_type:
description:
- whether you want to deploy an image or create an instance from scratch.
default: null
required: false
aliases: []
choices: [ 'new', 'template' ]
zone:
description:
- deploy the image to this oVirt cluster
default: null
required: false
aliases: []
instance_disksize:
description:
- size of the instance's disk in GB
default: null
required: false
aliases: [ vm_disksize]
instance_cpus:
description:
- the instance's number of cpu's
default: 1
required: false
aliases: [ vmcpus ]
instance_nic:
description:
- name of the network interface in oVirt/RHEV
default: null
required: false
aliases: [ vmnic ]
instance_network:
description:
- the logical network the machine should belong to
default: rhevm
required: false
aliases: [ vmnetwork ]
instance_mem:
description:
- the instance's amount of memory in MB
default: null
required: false
aliases: [ vmmem ]
instance_type:
description:
- define if the instance is a server or desktop
default: server
required: false
aliases: [ vmtype ]
choices: [ 'server', 'desktop' ]
disk_alloc:
description:
- define if disk is thin or preallocated
default: thin
required: false
aliases: []
choices: [ 'thin', 'preallocated' ]
disk_int:
description:
- interface type of the disk
default: virtio
required: false
aliases: []
choices: [ 'virtio', 'ide' ]
instance_os:
description:
- type of Operating System
default: null
required: false
aliases: [ vmos ]
instance_cores:
description:
- define the instance's number of cores
default: 1
required: false
aliases: [ vmcores ]
sdomain:
description:
- the Storage Domain where you want to create the instance's disk on.
default: null
required: false
aliases: []
region:
description:
- the oVirt/RHEV datacenter where you want to deploy to
default: null
required: false
aliases: []
state:
description:
- create, terminate or remove instances
default: 'present'
required: false
aliases: []
choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
requirements:
- "python >= 2.6"
- "ovirt-engine-sdk-python"
'''
EXAMPLES = '''
# Basic example provisioning from image.
action: ovirt >
user=admin@internal
url=https://ovirt.example.com
instance_name=ansiblevm04
password=secret
image=centos_64
zone=cluster01
resource_type=template"
# Full example to create new instance from scratch
action: ovirt >
instance_name=testansible
resource_type=new
instance_type=server
user=admin@internal
password=secret
url=https://ovirt.example.com
instance_disksize=10
zone=cluster01
region=datacenter1
instance_cpus=1
instance_nic=nic1
instance_network=rhevm
instance_mem=1000
disk_alloc=thin
sdomain=FIBER01
instance_cores=1
instance_os=rhel_6x64
disk_int=virtio"
# stopping an instance
action: ovirt >
instance_name=testansible
state=stopped
user=admin@internal
password=secret
url=https://ovirt.example.com
# starting an instance
action: ovirt >
instance_name=testansible
state=started
user=admin@internal
password=secret
url=https://ovirt.example.com
'''
import sys
try:
from ovirtsdk.api import API
from ovirtsdk.xml import params
except ImportError:
print "failed=True msg='ovirtsdk required for this module'"
sys.exit(1)
# ------------------------------------------------------------------- #
# create connection with API
#
def conn(url, user, password):
api = API(url=url, username=user, password=password, insecure=True)
try:
value = api.test()
except:
print "error connecting to the oVirt API"
sys.exit(1)
return api
# ------------------------------------------------------------------- #
# Create VM from scratch
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
if vmdisk_alloc == 'thin':
# define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
elif vmdisk_alloc == 'preallocated':
# define VM params
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
try:
conn.vms.add(vmparams)
except:
print "Error creating VM with specified parameters"
sys.exit(1)
vm = conn.vms.get(name=vmname)
try:
vm.disks.add(vmdisk)
except:
print "Error attaching disk"
try:
vm.nics.add(nic_net1)
except:
print "Error adding nic"
# create an instance from a template
def create_vm_template(conn, vmname, image, zone):
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True))
try:
conn.vms.add(vmparams)
except:
print 'error adding template %s' % image
sys.exit(1)
# start instance
def vm_start(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.start()
# Stop instance
def vm_stop(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.stop()
# restart instance
def vm_restart(conn, vmname):
state = vm_status(conn, vmname)
vm = conn.vms.get(name=vmname)
vm.stop()
while conn.vms.get(vmname).get_status().get_state() != 'down':
time.sleep(5)
vm.start()
# remove an instance
def vm_remove(conn, vmname):
vm = conn.vms.get(name=vmname)
vm.delete()
# ------------------------------------------------------------------- #
# VM statuses
#
# Get the VMs status
def vm_status(conn, vmname):
status = conn.vms.get(name=vmname).status.state
print "vm status is : %s" % status
return status
# Get VM object and return it's name if object exists
def get_vm(conn, vmname):
vm = conn.vms.get(name=vmname)
if vm == None:
name = "empty"
print "vmname: %s" % name
else:
name = vm.get_name()
print "vmname: %s" % name
return name
# ------------------------------------------------------------------- #
# Hypervisor operations
#
# not available yet
# ------------------------------------------------------------------- #
# Main
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']),
#name = dict(required=True),
user = dict(required=True),
url = dict(required=True),
instance_name = dict(required=True, aliases=['vmname']),
password = dict(required=True),
image = dict(),
resource_type = dict(choices=['new', 'template']),
zone = dict(),
instance_disksize = dict(aliases=['vm_disksize']),
instance_cpus = dict(default=1, aliases=['vmcpus']),
instance_nic = dict(aliases=['vmnic']),
instance_network = dict(default='rhevm', aliases=['vmnetwork']),
instance_mem = dict(aliases=['vmmem']),
instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']),
disk_alloc = dict(default='thin', choices=['thin', 'preallocated']),
disk_int = dict(default='virtio', choices=['virtio', 'ide']),
instance_os = dict(aliases=['vmos']),
instance_cores = dict(default=1, aliases=['vmcores']),
sdomain = dict(),
region = dict(),
)
)
state = module.params['state']
user = module.params['user']
url = module.params['url']
vmname = module.params['instance_name']
password = module.params['password']
image = module.params['image'] # name of the image to deploy
resource_type = module.params['resource_type'] # template or from scratch
zone = module.params['zone'] # oVirt cluster
vmdisk_size = module.params['instance_disksize'] # disksize
vmcpus = module.params['instance_cpus'] # number of cpu
vmnic = module.params['instance_nic'] # network interface
vmnetwork = module.params['instance_network'] # logical network
vmmem = module.params['instance_mem'] # mem size
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
vmos = module.params['instance_os'] # Operating System
vmtype = module.params['instance_type'] # server or desktop
vmcores = module.params['instance_cores'] # number of cores
sdomain = module.params['sdomain'] # storage domain to store disk on
region = module.params['region'] # oVirt Datacenter
#initialize connection
c = conn(url+"/api", user, password)
if state == 'present':
if get_vm(c, vmname) == "empty":
if resource_type == 'template':
create_vm_template(c, vmname, image, zone)
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image))
elif resource_type == 'new':
# FIXME: refactor, use keyword args.
create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
else:
module.exit_json(changed=False, msg="You did not specify a resource type")
else:
module.exit_json(changed=False, msg="VM %s already exists" % vmname)
if state == 'started':
if vm_status(c, vmname) == 'up':
module.exit_json(changed=False, msg="VM %s is already running" % vmname)
else:
vm_start(c, vmname)
module.exit_json(changed=True, msg="VM %s started" % vmname)
if state == 'shutdown':
if vm_status(c, vmname) == 'down':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
else:
vm_stop(c, vmname)
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
if state == 'restart':
if vm_status(c, vmname) == 'up':
vm_restart(c, vmname)
module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
else:
module.exit_json(changed=False, msg="VM %s is not running" % vmname)
if state == 'absent':
if get_vm(c, vmname) == "empty":
module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
else:
vm_remove(c, vmname)
module.exit_json(changed=True, msg="VM %s removed" % vmname)
# import module snippets
from ansible.module_utils.basic import *
main()
|
driera/vz-beauty
|
refs/heads/master
|
dev/node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/common_test.py
|
2542
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
|
joaormatos/anaconda
|
refs/heads/master
|
Anaconda/standalone/trunk/buildtests/libraries/test_Image2.py
|
2
|
#
# Copyright (C) 2007, Giovanni Bajo
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# Verify packaging of PIL.Image. Specifically, the hidden import of FixTk
# importing tkinter is causing some problems.
from Image import fromstring
print fromstring
|
bdfoster/blumate
|
refs/heads/master
|
tests/components/sensor/test_rfxtrx.py
|
1
|
"""The tests for the Rfxtrx sensor platform."""
import unittest
from blumate.bootstrap import _setup_component
from blumate.components import rfxtrx as rfxtrx_core
from blumate.const import TEMP_CELSIUS
from tests.common import get_test_home_assistant
class TestSensorRfxtrx(unittest.TestCase):
"""Test the Rfxtrx sensor platform."""
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant(0)
self.hass.config.components = ['rfxtrx']
def tearDown(self):
"""Stop everything that was started."""
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS = []
rfxtrx_core.RFX_DEVICES = {}
self.hass.stop()
def test_default_config(self):
"""Test with 0 sensor."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{}}}))
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
def test_old_config_sensor(self):
"""Test with 1 sensor."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'sensor_0502': {
'name': 'Test',
'packetid': '0a52080705020095220269',
'data_type': 'Temperature'}}}}))
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual('Test', entity.name)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(14.9, entity.state)
self.assertEqual({'Humidity status': 'normal', 'Temperature': 14.9,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
def test_one_sensor(self):
"""Test with 1 sensor."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test',
'data_type': 'Temperature'}}}}))
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual('Test', entity.name)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(14.9, entity.state)
self.assertEqual({'Humidity status': 'normal', 'Temperature': 14.9,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
def test_one_sensor_no_datatype(self):
"""Test with 1 sensor."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test'}}}}))
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual('Test', entity.name)
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(14.9, entity.state)
self.assertEqual({'Humidity status': 'normal', 'Temperature': 14.9,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
def test_several_sensors(self):
"""Test with 3 sensors."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test',
'data_type': 'Temperature'},
'0a520802060100ff0e0269': {
'name': 'Bath',
'data_type': ['Temperature', 'Humidity']
}}}}))
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == 'sensor_0601':
device_num = device_num + 1
self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2)
_entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature']
_entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity']
self.assertEqual('%', _entity_hum.unit_of_measurement)
self.assertEqual(14, _entity_hum.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 25.5,
'Humidity': 14, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
_entity_hum.device_state_attributes)
self.assertEqual('Bath', _entity_hum.__str__())
self.assertEqual(TEMP_CELSIUS,
_entity_temp.unit_of_measurement)
self.assertEqual(25.5, _entity_temp.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 25.5,
'Humidity': 14, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
_entity_temp.device_state_attributes)
self.assertEqual('Bath', _entity_temp.__str__())
elif id == 'sensor_0502':
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]['Temperature']
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(14.9, entity.state)
self.assertEqual({'Humidity status': 'normal',
'Temperature': 14.9,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('Test', entity.__str__())
self.assertEqual(2, device_num)
def test_discover_sensor(self):
"""Test with discovery of sensor."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': True,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0a520801070100b81b0279')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['sensor_0701']['Temperature']
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 18.4,
'Rssi numeric': 7, 'Humidity': 27,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('0a520801070100b81b0279',
entity.__str__())
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(1, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a52080405020095240279')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['sensor_0502']['Temperature']
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 14.9,
'Rssi numeric': 7, 'Humidity': 36,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('0a52080405020095240279',
entity.__str__())
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
entity = rfxtrx_core.RFX_DEVICES['sensor_0701']['Temperature']
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
self.assertEqual({'Humidity status': 'normal',
'Temperature': 17.9,
'Rssi numeric': 7, 'Humidity': 27,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('0a520801070100b81b0279',
entity.__str__())
# trying to add a switch
event = rfxtrx_core.get_rfx_object('0b1100cd0213c7f210010f70')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
def test_discover_sensor_noautoadd(self):
"""Test with discover of sensor when auto add is False."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'automatic_add': False,
'devices': {}}}))
event = rfxtrx_core.get_rfx_object('0a520801070100b81b0279')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a52080405020095240279')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
event = rfxtrx_core.get_rfx_object('0a52085e070100b31b0279')
event.data = bytearray(b'\nR\x08^\x07\x01\x00\xb3\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(0, len(rfxtrx_core.RFX_DEVICES))
def test_update_of_sensors(self):
"""Test with 3 sensors."""
self.assertTrue(_setup_component(self.hass, 'sensor', {
'sensor': {'platform': 'rfxtrx',
'devices':
{'0a52080705020095220269': {
'name': 'Test',
'data_type': 'Temperature'},
'0a520802060100ff0e0269': {
'name': 'Bath',
'data_type': ['Temperature', 'Humidity']
}}}}))
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == 'sensor_0601':
device_num = device_num + 1
self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2)
_entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature']
_entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity']
self.assertEqual('%', _entity_hum.unit_of_measurement)
self.assertEqual(14, _entity_hum.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 25.5,
'Humidity': 14, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
_entity_hum.device_state_attributes)
self.assertEqual('Bath', _entity_hum.__str__())
self.assertEqual(TEMP_CELSIUS,
_entity_temp.unit_of_measurement)
self.assertEqual(25.5, _entity_temp.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 25.5,
'Humidity': 14, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
_entity_temp.device_state_attributes)
self.assertEqual('Bath', _entity_temp.__str__())
elif id == 'sensor_0502':
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]['Temperature']
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(14.9, entity.state)
self.assertEqual({'Humidity status': 'normal',
'Temperature': 14.9,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('Test', entity.__str__())
self.assertEqual(2, device_num)
event = rfxtrx_core.get_rfx_object('0a520802060101ff0f0269')
event.data = bytearray(b'\nR\x08\x01\x07\x01\x00\xb8\x1b\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
event = rfxtrx_core.get_rfx_object('0a52080705020085220269')
event.data = bytearray(b'\nR\x08\x04\x05\x02\x00\x95$\x02y')
rfxtrx_core.RECEIVED_EVT_SUBSCRIBERS[0](event)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
device_num = 0
for id in rfxtrx_core.RFX_DEVICES:
if id == 'sensor_0601':
device_num = device_num + 1
self.assertEqual(len(rfxtrx_core.RFX_DEVICES[id]), 2)
_entity_temp = rfxtrx_core.RFX_DEVICES[id]['Temperature']
_entity_hum = rfxtrx_core.RFX_DEVICES[id]['Humidity']
self.assertEqual('%', _entity_hum.unit_of_measurement)
self.assertEqual(15, _entity_hum.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 51.1,
'Humidity': 15, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
_entity_hum.device_state_attributes)
self.assertEqual('Bath', _entity_hum.__str__())
self.assertEqual(TEMP_CELSIUS,
_entity_temp.unit_of_measurement)
self.assertEqual(51.1, _entity_temp.state)
self.assertEqual({'Battery numeric': 9, 'Temperature': 51.1,
'Humidity': 15, 'Humidity status': 'normal',
'Humidity status numeric': 2,
'Rssi numeric': 6},
_entity_temp.device_state_attributes)
self.assertEqual('Bath', _entity_temp.__str__())
elif id == 'sensor_0502':
device_num = device_num + 1
entity = rfxtrx_core.RFX_DEVICES[id]['Temperature']
self.assertEqual(TEMP_CELSIUS, entity.unit_of_measurement)
self.assertEqual(13.3, entity.state)
self.assertEqual({'Humidity status': 'normal',
'Temperature': 13.3,
'Rssi numeric': 6, 'Humidity': 34,
'Battery numeric': 9,
'Humidity status numeric': 2},
entity.device_state_attributes)
self.assertEqual('Test', entity.__str__())
self.assertEqual(2, device_num)
self.assertEqual(2, len(rfxtrx_core.RFX_DEVICES))
|
szeged/servo
|
refs/heads/master
|
python/tidy/servo_tidy_tests/test_tidy.py
|
7
|
# Copyright 2013 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import os
import unittest
from servo_tidy import tidy
base_path = 'servo_tidy_tests/' if os.path.exists('servo_tidy_tests/') else 'python/tidy/servo_tidy_tests/'
def iterFile(name):
return iter([os.path.join(base_path, name)])
class CheckTidiness(unittest.TestCase):
def assertNoMoreErrors(self, errors):
with self.assertRaises(StopIteration):
errors.next()
def test_tidy_config(self):
errors = tidy.check_config_file(os.path.join(base_path, 'servo-tidy.toml'), print_text=False)
self.assertEqual("invalid config key 'key-outside'", errors.next()[2])
self.assertEqual("invalid config key 'wrong-key'", errors.next()[2])
self.assertEqual('invalid config table [wrong]', errors.next()[2])
self.assertEqual("ignored file './fake/file.html' doesn't exist", errors.next()[2])
self.assertEqual("ignored directory './fake/dir' doesn't exist", errors.next()[2])
self.assertNoMoreErrors(errors)
def test_non_existing_wpt_manifest_checks(self):
wrong_path = "/wrong/path.ini"
errors = tidy.check_manifest_dirs(wrong_path, print_text=False)
self.assertEqual("%s manifest file is required but was not found" % wrong_path, errors.next()[2])
self.assertNoMoreErrors(errors)
errors = tidy.check_manifest_dirs(os.path.join(base_path, 'manifest-include.ini'), print_text=False)
self.assertTrue(errors.next()[2].endswith("never_going_to_exist"))
self.assertNoMoreErrors(errors)
def test_directory_checks(self):
dirs = {
os.path.join(base_path, "dir_check/webidl_plus"): ['webidl', 'test'],
os.path.join(base_path, "dir_check/only_webidl"): ['webidl']
}
errors = tidy.check_directory_files(dirs)
error_dir = os.path.join(base_path, "dir_check/webidl_plus")
self.assertEqual("Unexpected extension found for test.rs. We only expect files with webidl, test extensions in {0}".format(error_dir), errors.next()[2])
self.assertEqual("Unexpected extension found for test2.rs. We only expect files with webidl, test extensions in {0}".format(error_dir), errors.next()[2])
self.assertNoMoreErrors(errors)
def test_spaces_correctnes(self):
errors = tidy.collect_errors_for_files(iterFile('wrong_space.rs'), [], [tidy.check_by_line], print_text=False)
self.assertEqual('trailing whitespace', errors.next()[2])
self.assertEqual('no newline at EOF', errors.next()[2])
self.assertEqual('tab on line', errors.next()[2])
self.assertEqual('CR on line', errors.next()[2])
self.assertEqual('no newline at EOF', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_empty_file(self):
errors = tidy.collect_errors_for_files(iterFile('empty_file.rs'), [], [tidy.check_by_line], print_text=False)
self.assertEqual('file is empty', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_long_line(self):
errors = tidy.collect_errors_for_files(iterFile('long_line.rs'), [], [tidy.check_by_line], print_text=False)
self.assertEqual('Line is longer than 120 characters', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_whatwg_link(self):
errors = tidy.collect_errors_for_files(iterFile('whatwg_link.rs'), [], [tidy.check_by_line], print_text=False)
self.assertTrue('link to WHATWG may break in the future, use this format instead:' in errors.next()[2])
self.assertTrue('links to WHATWG single-page url, change to multi page:' in errors.next()[2])
self.assertNoMoreErrors(errors)
def test_license(self):
errors = tidy.collect_errors_for_files(iterFile('incorrect_license.rs'), [], [tidy.check_license], print_text=False)
self.assertEqual('incorrect license', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_shebang_license(self):
errors = tidy.collect_errors_for_files(iterFile('shebang_license.py'), [], [tidy.check_license], print_text=False)
self.assertEqual('missing blank line after shebang', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_shell(self):
errors = tidy.collect_errors_for_files(iterFile('shell_tidy.sh'), [], [tidy.check_shell], print_text=False)
self.assertEqual('script does not have shebang "#!/usr/bin/env bash"', errors.next()[2])
self.assertEqual('script is missing options "set -o errexit", "set -o pipefail"', errors.next()[2])
self.assertEqual('script should not use backticks for command substitution', errors.next()[2])
self.assertEqual('variable substitutions should use the full \"${VAR}\" form', errors.next()[2])
self.assertEqual('script should use `[[` instead of `[` for conditional testing', errors.next()[2])
self.assertEqual('script should use `[[` instead of `[` for conditional testing', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_apache2_incomplete(self):
errors = tidy.collect_errors_for_files(iterFile('apache2_license.rs'), [], [tidy.check_license])
self.assertEqual('incorrect license', errors.next()[2])
def test_rust(self):
errors = tidy.collect_errors_for_files(iterFile('rust_tidy.rs'), [], [tidy.check_rust], print_text=False)
self.assertEqual('extra space after use', errors.next()[2])
self.assertEqual('missing space before }', errors.next()[2])
self.assertEqual('use statement contains braces for single import', errors.next()[2])
self.assertTrue('mod declaration is not in alphabetical order' in errors.next()[2])
self.assertEqual('mod declaration spans multiple lines', errors.next()[2])
self.assertTrue('extern crate declaration is not in alphabetical order' in errors.next()[2])
self.assertTrue('derivable traits list is not in alphabetical order' in errors.next()[2])
self.assertEqual('found an empty line following a {', errors.next()[2])
self.assertEqual('missing space before ->', errors.next()[2])
self.assertEqual('missing space after ->', errors.next()[2])
self.assertEqual('missing space after :', errors.next()[2])
self.assertEqual('missing space before {', errors.next()[2])
self.assertEqual('missing space before =', errors.next()[2])
self.assertEqual('missing space after =', errors.next()[2])
self.assertEqual('missing space before -', errors.next()[2])
self.assertEqual('missing space before *', errors.next()[2])
self.assertEqual('missing space after =>', errors.next()[2])
self.assertEqual('missing space after :', errors.next()[2])
self.assertEqual('missing space after :', errors.next()[2])
self.assertEqual('extra space before :', errors.next()[2])
self.assertEqual('extra space before :', errors.next()[2])
self.assertEqual('use &[T] instead of &Vec<T>', errors.next()[2])
self.assertEqual('use &str instead of &String', errors.next()[2])
self.assertEqual('use &T instead of &Root<T>', errors.next()[2])
self.assertEqual('encountered function signature with -> ()', errors.next()[2])
self.assertEqual('operators should go at the end of the first line', errors.next()[2])
self.assertEqual('else braces should be on the same line', errors.next()[2])
self.assertEqual('extra space after (', errors.next()[2])
self.assertEqual('extra space after (', errors.next()[2])
self.assertEqual('extra space after (', errors.next()[2])
self.assertEqual('extra space after test_fun', errors.next()[2])
self.assertEqual('no = in the beginning of line', errors.next()[2])
self.assertEqual('space before { is not a multiple of 4', errors.next()[2])
self.assertEqual('space before } is not a multiple of 4', errors.next()[2])
self.assertEqual('extra space after if', errors.next()[2])
self.assertNoMoreErrors(errors)
feature_errors = tidy.collect_errors_for_files(iterFile('lib.rs'), [], [tidy.check_rust], print_text=False)
self.assertTrue('feature attribute is not in alphabetical order' in feature_errors.next()[2])
self.assertTrue('feature attribute is not in alphabetical order' in feature_errors.next()[2])
self.assertTrue('feature attribute is not in alphabetical order' in feature_errors.next()[2])
self.assertTrue('feature attribute is not in alphabetical order' in feature_errors.next()[2])
self.assertNoMoreErrors(feature_errors)
ban_errors = tidy.collect_errors_for_files(iterFile('ban.rs'), [], [tidy.check_rust], print_text=False)
self.assertEqual('Banned type Cell<JSVal> detected. Use MutDom<JSVal> instead', ban_errors.next()[2])
self.assertNoMoreErrors(ban_errors)
ban_errors = tidy.collect_errors_for_files(iterFile('ban-domrefcell.rs'), [], [tidy.check_rust], print_text=False)
self.assertEqual('Banned type DomRefCell<Dom<T>> detected. Use MutDom<T> instead', ban_errors.next()[2])
self.assertNoMoreErrors(ban_errors)
def test_spec_link(self):
tidy.SPEC_BASE_PATH = base_path
errors = tidy.collect_errors_for_files(iterFile('speclink.rs'), [], [tidy.check_spec], print_text=False)
self.assertEqual('method declared in webidl is missing a comment with a specification link', errors.next()[2])
self.assertEqual('method declared in webidl is missing a comment with a specification link', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_script_thread(self):
errors = tidy.collect_errors_for_files(iterFile('script_thread.rs'), [], [tidy.check_rust], print_text=False)
self.assertEqual('use a separate variable for the match expression', errors.next()[2])
self.assertEqual('use a separate variable for the match expression', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_webidl(self):
errors = tidy.collect_errors_for_files(iterFile('spec.webidl'), [tidy.check_webidl_spec], [], print_text=False)
self.assertEqual('No specification link found.', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_toml(self):
errors = tidy.collect_errors_for_files(iterFile('Cargo.toml'), [tidy.check_toml], [], print_text=False)
self.assertEqual('found asterisk instead of minimum version number', errors.next()[2])
self.assertEqual('.toml file should contain a valid license.', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_modeline(self):
errors = tidy.collect_errors_for_files(iterFile('modeline.txt'), [], [tidy.check_modeline], print_text=False)
self.assertEqual('vi modeline present', errors.next()[2])
self.assertEqual('vi modeline present', errors.next()[2])
self.assertEqual('vi modeline present', errors.next()[2])
self.assertEqual('emacs file variables present', errors.next()[2])
self.assertEqual('emacs file variables present', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_malformed_json(self):
errors = tidy.collect_errors_for_files(iterFile('malformed_json.json'), [tidy.check_json], [], print_text=False)
self.assertEqual('Invalid control character at: line 3 column 40 (char 61)', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_json_with_duplicate_key(self):
errors = tidy.collect_errors_for_files(iterFile('duplicate_key.json'), [tidy.check_json], [], print_text=False)
self.assertEqual('Duplicated Key (the_duplicated_key)', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_json_with_unordered_keys(self):
tidy.config["check-ordered-json-keys"].append('python/tidy/servo_tidy_tests/unordered_key.json')
errors = tidy.collect_errors_for_files(iterFile('unordered_key.json'), [tidy.check_json], [], print_text=False)
self.assertEqual('Unordered key (found b before a)', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_yaml_with_duplicate_key(self):
errors = tidy.collect_errors_for_files(iterFile('duplicate_keys_buildbot_steps.yml'), [tidy.check_yaml], [], print_text=False)
self.assertEqual('Duplicated Key (duplicate_yaml_key)', errors.next()[2])
self.assertNoMoreErrors(errors)
def test_non_list_mapped_buildbot_steps(self):
errors = tidy.collect_errors_for_files(iterFile('non_list_mapping_buildbot_steps.yml'), [tidy.check_yaml], [], print_text=False)
self.assertEqual("expected a list for dictionary value @ data['non-list-key']", errors.next()[2])
self.assertNoMoreErrors(errors)
def test_non_string_list_mapping_buildbot_steps(self):
errors = tidy.collect_errors_for_files(iterFile('non_string_list_buildbot_steps.yml'), [tidy.check_yaml], [], print_text=False)
self.assertEqual("expected str @ data['mapping_key'][0]", errors.next()[2])
self.assertNoMoreErrors(errors)
def test_lock(self):
errors = tidy.collect_errors_for_files(iterFile('duplicated_package.lock'), [tidy.check_lock], [], print_text=False)
msg = """duplicate versions for package `test`
\t\x1b[93mThe following packages depend on version 0.4.9 from 'crates.io':\x1b[0m
\t\ttest2
\t\x1b[93mThe following packages depend on version 0.5.1 from 'crates.io':\x1b[0m"""
self.assertEqual(msg, errors.next()[2])
msg2 = """duplicate versions for package `test3`
\t\x1b[93mThe following packages depend on version 0.5.1 from 'crates.io':\x1b[0m
\t\ttest4
\t\x1b[93mThe following packages depend on version 0.5.1 from 'https://github.com/user/test3':\x1b[0m
\t\ttest5"""
self.assertEqual(msg2, errors.next()[2])
self.assertNoMoreErrors(errors)
def test_lock_ignore_without_duplicates(self):
tidy.config["ignore"]["packages"] = ["test", "test2", "test3", "test5"]
errors = tidy.collect_errors_for_files(iterFile('duplicated_package.lock'), [tidy.check_lock], [], print_text=False)
msg = (
"duplicates for `test2` are allowed, but only single version found"
"\n\t\x1b[93mThe following packages depend on version 0.1.0 from 'https://github.com/user/test2':\x1b[0m"
)
self.assertEqual(msg, errors.next()[2])
msg2 = (
"duplicates for `test5` are allowed, but only single version found"
"\n\t\x1b[93mThe following packages depend on version 0.1.0 from 'https://github.com/':\x1b[0m"
)
self.assertEqual(msg2, errors.next()[2])
self.assertNoMoreErrors(errors)
def test_lint_runner(self):
test_path = base_path + 'lints/'
runner = tidy.LintRunner(only_changed_files=False, progress=False)
runner.path = test_path + 'some-fictional-file'
self.assertEqual([(runner.path, 0, "file does not exist")], list(runner.check()))
runner.path = test_path + 'not_script'
self.assertEqual([(runner.path, 0, "lint should be a python script")],
list(runner.check()))
runner.path = test_path + 'not_inherited.py'
self.assertEqual([(runner.path, 1, "class 'Lint' should inherit from 'LintRunner'")],
list(runner.check()))
runner.path = test_path + 'no_lint.py'
self.assertEqual([(runner.path, 1, "script should contain a class named 'Lint'")],
list(runner.check()))
runner.path = test_path + 'no_run.py'
self.assertEqual([(runner.path, 0, "class 'Lint' should implement 'run' method")],
list(runner.check()))
runner.path = test_path + 'invalid_error_tuple.py'
self.assertEqual([(runner.path, 1, "errors should be a tuple of (path, line, reason)")],
list(runner.check()))
runner.path = test_path + 'proper_file.py'
self.assertEqual([('path', 0, "foobar")], list(runner.check()))
def test_file_list(self):
base_path='./python/tidy/servo_tidy_tests/test_ignored'
file_list = tidy.FileList(base_path, only_changed_files=False, exclude_dirs=[])
lst = list(file_list)
self.assertEqual([os.path.join(base_path, 'whee', 'test.rs'), os.path.join(base_path, 'whee', 'foo', 'bar.rs')], lst)
file_list = tidy.FileList(base_path, only_changed_files=False,
exclude_dirs=[os.path.join(base_path, 'whee', 'foo')])
lst = list(file_list)
self.assertEqual([os.path.join(base_path, 'whee', 'test.rs')], lst)
def test_multiline_string(self):
errors = tidy.collect_errors_for_files(iterFile('multiline_string.rs'), [], [tidy.check_rust], print_text=True)
self.assertNoMoreErrors(errors)
def do_tests():
suite = unittest.TestLoader().loadTestsFromTestCase(CheckTidiness)
return 0 if unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful() else 1
|
rs/petl
|
refs/heads/master
|
src/petl/io/db.py
|
1
|
from __future__ import absolute_import, print_function, division
__author__ = 'Alistair Miles <alimanfoo@googlemail.com>'
# standard library dependencies
import logging
# internal dependencies
from petl.util import RowContainer
logger = logging.getLogger(__name__)
warning = logger.warning
info = logger.info
debug = logger.debug
def fromdb(dbo, query, *args, **kwargs):
"""
Provides access to data from any DB-API 2.0 connection via a given query.
E.g., using `sqlite3`::
>>> import sqlite3
>>> from petl import look, fromdb
>>> connection = sqlite3.connect('test.db')
>>> table = fromdb(connection, 'select * from foobar')
>>> look(table)
E.g., using `psycopg2` (assuming you've installed it first)::
>>> import psycopg2
>>> from petl import look, fromdb
>>> connection = psycopg2.connect("dbname=test user=postgres")
>>> table = fromdb(connection, 'select * from test')
>>> look(table)
E.g., using `MySQLdb` (assuming you've installed it first)::
>>> import MySQLdb
>>> from petl import look, fromdb
>>> connection = MySQLdb.connect(passwd="moonpie", db="thangs")
>>> table = fromdb(connection, 'select * from test')
>>> look(table)
.. versionchanged:: 0.10.2
The first argument may also be a function that creates a cursor. E.g.::
>>> import psycopg2
>>> from petl import look, fromdb
>>> connection = psycopg2.connect("dbname=test user=postgres")
>>> mkcursor = lambda: connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
>>> table = fromdb(mkcursor, 'select * from test')
>>> look(table)
N.B., each call to the function should return a new cursor.
.. versionchanged:: 0.18
Added support for server-side cursors.
Note that the default behaviour of most database servers and clients is for
the entire result set for each query to be sent from the server to the
client. If your query returns a large result set this can result in
significant memory usage at the client. Some databases support server-side
cursors which provide a means for client libraries to fetch result sets
incrementally, reducing memory usage at the client.
To use a server-side cursor with a PostgreSQL database, e.g.::
>>> import psycopg2
>>> from petl import look, fromdb
>>> connection = psycopg2.connect("dbname=test user=postgres")
>>> table = fromdb(lambda: connection.cursor(name='arbitrary'), 'select * from test')
>>> look(table)
To use a server-side cursor with a MySQL database, e.g.::
>>> import MySQLdb
>>> from petl import look, fromdb
>>> connection = MySQLdb.connect(passwd="moonpie", db="thangs")
>>> table = fromdb(lambda: connection.cursor(MySQLdb.cursors.SSCursor), 'select * from test')
>>> look(table)
For more information on server-side cursors see the following links:
* http://initd.org/psycopg/docs/usage.html#server-side-cursors
* http://mysql-python.sourceforge.net/MySQLdb.html#using-and-extending
"""
return DbView(dbo, query, *args, **kwargs)
def _is_dbapi_connection(dbo):
return _hasmethod(dbo, 'cursor')
def _is_dbapi_cursor(dbo):
return _hasmethods(dbo, 'execute', 'executemany', 'fetchone', 'fetchmany',
'fetchall')
def _is_sqlalchemy_engine(dbo):
return (_hasmethods(dbo, 'execute', 'contextual_connect', 'raw_connection')
and _hasprop(dbo, 'driver'))
def _is_sqlalchemy_session(dbo):
return _hasmethods(dbo, 'execute', 'connection', 'get_bind')
def _is_sqlalchemy_connection(dbo):
# N.B., this are not completely selective conditions, this test needs
# to be applied after ruling out DB-API cursor
return _hasmethod(dbo, 'execute') and _hasprop(dbo, 'connection')
class DbView(RowContainer):
def __init__(self, dbo, query, *args, **kwargs):
self.dbo = dbo
self.query = query
self.args = args
self.kwargs = kwargs
def __iter__(self):
# does it quack like a standard DB-API 2.0 connection?
if _is_dbapi_connection(self.dbo):
debug('assuming %r is standard DB-API 2.0 connection', self.dbo)
_iter = _iter_dbapi_connection
# does it quack like a standard DB-API 2.0 cursor?
elif _is_dbapi_cursor(self.dbo):
debug('assuming %r is standard DB-API 2.0 cursor')
warning('using a DB-API cursor with fromdb() is not recommended '
'and may lead to unexpected results, a DB-API connection '
'is better')
_iter = _iter_dbapi_cursor
# does it quack like an SQLAlchemy engine?
elif _is_sqlalchemy_engine(self.dbo):
debug('assuming %r instance of sqlalchemy.engine.base.Engine',
self.dbo)
_iter = _iter_sqlalchemy_engine
# does it quack like an SQLAlchemy session?
elif _is_sqlalchemy_session(self.dbo):
debug('assuming %r instance of sqlalchemy.orm.session.Session',
self.dbo)
_iter = _iter_sqlalchemy_session
# does it quack like an SQLAlchemy connection?
elif _is_sqlalchemy_connection(self.dbo):
debug('assuming %r instance of sqlalchemy.engine.base.Connection',
self.dbo)
_iter = _iter_sqlalchemy_connection
elif callable(self.dbo):
debug('assuming %r is a function returning a cursor', self.dbo)
_iter = _iter_dbapi_mkcurs
# some other sort of duck...
else:
raise Exception('unsupported database object type: %r' % self.dbo)
return _iter(self.dbo, self.query, *self.args, **self.kwargs)
def _iter_dbapi_mkcurs(mkcurs, query, *args, **kwargs):
cursor = mkcurs()
try:
for row in _iter_dbapi_cursor(cursor, query, *args, **kwargs):
yield row
finally:
cursor.close()
def _iter_dbapi_connection(connection, query, *args, **kwargs):
cursor = connection.cursor()
try:
for row in _iter_dbapi_cursor(cursor, query, *args, **kwargs):
yield row
finally:
cursor.close()
def _iter_dbapi_cursor(cursor, query, *args, **kwargs):
cursor.execute(query, *args, **kwargs)
# fetch one row before iterating, to force population of cursor.description
# which may be postponed if using server-side cursors
first_row = cursor.fetchone()
# fields should be available now
fields = [d[0] for d in cursor.description]
yield tuple(fields)
if first_row is None:
raise StopIteration
yield first_row
for row in cursor:
yield row # don't wrap, return whatever the database engine returns
def _iter_sqlalchemy_engine(engine, query, *args, **kwargs):
return _iter_sqlalchemy_connection(engine.contextual_connect(), query,
*args, **kwargs)
def _iter_sqlalchemy_connection(connection, query, *args, **kwargs):
debug('connection: %r', connection)
results = connection.execute(query, *args, **kwargs)
fields = results.keys()
yield tuple(fields)
for row in results:
yield row
def _iter_sqlalchemy_session(session, query, *args, **kwargs):
results = session.execute(query, *args, **kwargs)
fields = results.keys()
yield tuple(fields)
for row in results:
yield row
def todb(table, dbo, tablename, schema=None, commit=True):
"""
Load data into an existing database table via a DB-API 2.0
connection or cursor. Note that the database table will be truncated,
i.e., all existing rows will be deleted prior to inserting the new data.
E.g.::
>>> from petl import look, todb
>>> look(table)
+-------+-------+
| 'foo' | 'bar' |
+=======+=======+
| 'a' | 1 |
+-------+-------+
| 'b' | 2 |
+-------+-------+
| 'c' | 2 |
+-------+-------+
... using :mod:`sqlite3`::
>>> import sqlite3
>>> connection = sqlite3.connect('test.db')
>>> # assuming table "foobar" already exists in the database
... todb(table, connection, 'foobar')
... using :mod:`psycopg2`::
>>> import psycopg2
>>> connection = psycopg2.connect("dbname=test user=postgres")
>>> # assuming table "foobar" already exists in the database
... todb(table, connection, 'foobar')
... using :mod:`MySQLdb`::
>>> import MySQLdb
>>> connection = MySQLdb.connect(passwd="moonpie", db="thangs")
>>> # tell MySQL to use standard quote character
... connection.cursor().execute('SET SQL_MODE=ANSI_QUOTES')
>>> # load data, assuming table "foobar" already exists in the database
... todb(table, connection, 'foobar')
N.B., for MySQL the statement ``SET SQL_MODE=ANSI_QUOTES`` is required to
ensure MySQL uses SQL-92 standard quote characters.
.. versionchanged:: 0.10.2
A cursor can also be provided instead of a connection, e.g.::
>>> import psycopg2
>>> connection = psycopg2.connect("dbname=test user=postgres")
>>> cursor = connection.cursor()
>>> todb(table, cursor, 'foobar')
"""
_todb(table, dbo, tablename, schema=schema, commit=commit, truncate=True)
def _hasmethod(o, n):
return hasattr(o, n) and callable(getattr(o, n))
def _hasmethods(o, *l):
return all(_hasmethod(o, n) for n in l)
def _hasprop(o, n):
return hasattr(o, n) and not callable(getattr(o, n))
def _todb(table, dbo, tablename, schema=None, commit=True, truncate=False):
# need to deal with polymorphic dbo argument
# what sort of duck is it?
# does it quack like a standard DB-API 2.0 connection?
if _is_dbapi_connection(dbo):
debug('assuming %r is standard DB-API 2.0 connection', dbo)
_todb_dbapi_connection(table, dbo, tablename, schema=schema,
commit=commit, truncate=truncate)
# does it quack like a standard DB-API 2.0 cursor?
elif _is_dbapi_cursor(dbo):
debug('assuming %r is standard DB-API 2.0 cursor')
_todb_dbapi_cursor(table, dbo, tablename, schema=schema, commit=commit,
truncate=truncate)
# does it quack like an SQLAlchemy engine?
elif _is_sqlalchemy_engine(dbo):
debug('assuming %r instance of sqlalchemy.engine.base.Engine', dbo)
_todb_sqlalchemy_engine(table, dbo, tablename, schema=schema,
commit=commit, truncate=truncate)
# does it quack like an SQLAlchemy session?
elif _is_sqlalchemy_session(dbo):
debug('assuming %r instance of sqlalchemy.orm.session.Session', dbo)
_todb_sqlalchemy_session(table, dbo, tablename, schema=schema,
commit=commit, truncate=truncate)
# does it quack like an SQLAlchemy connection?
elif _is_sqlalchemy_connection(dbo):
debug('assuming %r instance of sqlalchemy.engine.base.Connection', dbo)
_todb_sqlalchemy_connection(table, dbo, tablename, schema=schema,
commit=commit, truncate=truncate)
elif callable(dbo):
debug('assuming %r is a function returning standard DB-API 2.0 cursor '
'objects', dbo)
_todb_dbapi_mkcurs(table, dbo, tablename, schema=schema, commit=commit,
truncate=truncate)
# some other sort of duck...
else:
raise Exception('unsupported database object type: %r' % dbo)
SQL_TRUNCATE_QUERY = u'DELETE FROM %s'
SQL_INSERT_QUERY = u'INSERT INTO %s (%s) VALUES (%s)'
def _todb_dbapi_connection(table, connection, tablename, schema=None,
commit=True, truncate=False):
# sanitise table name
tablename = _quote(tablename)
if schema is not None:
tablename = _quote(schema) + '.' + tablename
debug('tablename: %r', tablename)
# sanitise field names
it = iter(table)
fields = it.next()
fieldnames = map(str, fields)
colnames = [_quote(n) for n in fieldnames]
debug('column names: %r', colnames)
# determine paramstyle and build placeholders string
placeholders = _placeholders(connection, colnames)
debug('placeholders: %r', placeholders)
# get a cursor
cursor = connection.cursor()
if truncate:
# TRUNCATE is not supported in some databases and causing locks with
# MySQL used via SQLAlchemy, fall back to DELETE FROM for now
truncatequery = SQL_TRUNCATE_QUERY % tablename
debug('truncate the table via query %r', truncatequery)
cursor.execute(truncatequery)
# just in case, close and resurrect cursor
cursor.close()
cursor = connection.cursor()
# insertquery = 'INSERT INTO %s VALUES (%s)' % (tablename, placeholders)
insertcolnames = ', '.join(colnames)
insertquery = SQL_INSERT_QUERY % (tablename, insertcolnames, placeholders)
debug('insert data via query %r' % insertquery)
cursor.executemany(insertquery, it)
# finish up
debug('close the cursor')
cursor.close()
if commit:
debug('commit transaction')
connection.commit()
def _todb_dbapi_mkcurs(table, mkcurs, tablename, schema=None, commit=True,
truncate=False):
# sanitise table name
tablename = _quote(tablename)
if schema is not None:
tablename = _quote(schema) + '.' + tablename
debug('tablename: %r', tablename)
# sanitise field names
it = iter(table)
fields = it.next()
fieldnames = map(str, fields)
colnames = [_quote(n) for n in fieldnames]
debug('column names: %r', colnames)
debug('obtain cursor and connection')
cursor = mkcurs()
# N.B., we depend on this optional DB-API 2.0 attribute being implemented
assert hasattr(cursor, 'connection'), \
'could not obtain connection via cursor'
connection = cursor.connection
# determine paramstyle and build placeholders string
placeholders = _placeholders(connection, colnames)
debug('placeholders: %r', placeholders)
if truncate:
# TRUNCATE is not supported in some databases and causing locks with
# MySQL used via SQLAlchemy, fall back to DELETE FROM for now
truncatequery = SQL_TRUNCATE_QUERY % tablename
debug('truncate the table via query %r', truncatequery)
cursor.execute(truncatequery)
# N.B., may be server-side cursor, need to resurrect
cursor.close()
cursor = mkcurs()
# insertquery = 'INSERT INTO %s VALUES (%s)' % (tablename, placeholders)
insertcolnames = ', '.join(colnames)
insertquery = SQL_INSERT_QUERY % (tablename, insertcolnames, placeholders)
debug('insert data via query %r' % insertquery)
cursor.executemany(insertquery, it)
cursor.close()
if commit:
debug('commit transaction')
connection.commit()
def _todb_dbapi_cursor(table, cursor, tablename, schema=None, commit=True,
truncate=False):
# sanitise table name
tablename = _quote(tablename)
if schema is not None:
tablename = _quote(schema) + '.' + tablename
debug('tablename: %r', tablename)
# sanitise field names
it = iter(table)
fields = it.next()
fieldnames = map(str, fields)
colnames = [_quote(n) for n in fieldnames]
debug('column names: %r', colnames)
debug('obtain connection via cursor')
# N.B., we depend on this optional DB-API 2.0 attribute being implemented
assert hasattr(cursor, 'connection'), \
'could not obtain connection via cursor'
connection = cursor.connection
# determine paramstyle and build placeholders string
placeholders = _placeholders(connection, colnames)
debug('placeholders: %r', placeholders)
if truncate:
# TRUNCATE is not supported in some databases and causing locks with
# MySQL used via SQLAlchemy, fall back to DELETE FROM for now
truncatequery = SQL_TRUNCATE_QUERY % tablename
debug('truncate the table via query %r', truncatequery)
cursor.execute(truncatequery)
# insertquery = 'INSERT INTO %s VALUES (%s)' % (tablename, placeholders)
insertcolnames = ', '.join(colnames)
insertquery = SQL_INSERT_QUERY % (tablename, insertcolnames, placeholders)
debug('insert data via query %r' % insertquery)
cursor.executemany(insertquery, it)
# N.B., don't close the cursor, leave that to the application
if commit:
debug('commit transaction')
connection.commit()
def _todb_sqlalchemy_engine(table, engine, tablename, schema=None, commit=True,
truncate=False):
_todb_sqlalchemy_connection(table, engine.contextual_connect(), tablename,
schema=schema, commit=commit, truncate=truncate)
def _todb_sqlalchemy_connection(table, connection, tablename, schema=None,
commit=True, truncate=False):
debug('connection: %r', connection)
# sanitise table name
tablename = _quote(tablename)
if schema is not None:
tablename = _quote(schema) + '.' + tablename
debug('tablename: %r', tablename)
# sanitise field names
it = iter(table)
fields = it.next()
fieldnames = map(str, fields)
colnames = [_quote(n) for n in fieldnames]
debug('column names: %r', colnames)
# N.B., we need to obtain a reference to the underlying DB-API connection so
# we can import the module and determine the paramstyle
proxied_raw_connection = connection.connection
actual_raw_connection = proxied_raw_connection.connection
# determine paramstyle and build placeholders string
placeholders = _placeholders(actual_raw_connection, colnames)
debug('placeholders: %r', placeholders)
if commit:
debug('begin transaction')
trans = connection.begin()
if truncate:
# TRUNCATE is not supported in some databases and causing locks with
# MySQL used via SQLAlchemy, fall back to DELETE FROM for now
truncatequery = SQL_TRUNCATE_QUERY % tablename
debug('truncate the table via query %r', truncatequery)
connection.execute(truncatequery)
# insertquery = 'INSERT INTO %s VALUES (%s)' % (tablename, placeholders)
insertcolnames = ', '.join(colnames)
insertquery = SQL_INSERT_QUERY % (tablename, insertcolnames, placeholders)
debug('insert data via query %r' % insertquery)
for row in it:
connection.execute(insertquery, row)
# finish up
if commit:
debug('commit transaction')
trans.commit()
# N.B., don't close connection, leave that to the application
def _todb_sqlalchemy_session(table, session, tablename, schema=None,
commit=True, truncate=False):
_todb_sqlalchemy_connection(table, session.connection(), tablename,
schema=schema, commit=commit,
truncate=truncate)
def appenddb(table, dbo, tablename, schema=None, commit=True):
"""
Load data into an existing database table via a DB-API 2.0
connection or cursor. Note that the database table will be appended,
i.e., the new data will be inserted into the table, and any existing
rows will remain. E.g.::
>>> from petl import look, appenddb
>>> look(table)
+-------+-------+
| 'foo' | 'bar' |
+=======+=======+
| 'a' | 1 |
+-------+-------+
| 'b' | 2 |
+-------+-------+
| 'c' | 2 |
+-------+-------+
... using :mod:`sqlite3`::
>>> import sqlite3
>>> connection = sqlite3.connect('test.db')
>>> # assuming table "foobar" already exists in the database
... appenddb(table, connection, 'foobar')
... using :mod:`psycopg2`::
>>> import psycopg2
>>> connection = psycopg2.connect("dbname=test user=postgres")
>>> # assuming table "foobar" already exists in the database
... appenddb(table, connection, 'foobar')
... using :mod:`MySQLdb`::
>>> import MySQLdb
>>> connection = MySQLdb.connect(passwd="moonpie", db="thangs")
>>> # tell MySQL to use standard quote character
... connection.cursor().execute('SET SQL_MODE=ANSI_QUOTES')
>>> # load data, appending rows to table "foobar"
... appenddb(table, connection, 'foobar')
N.B., for MySQL the statement ``SET SQL_MODE=ANSI_QUOTES`` is required to
ensure MySQL uses SQL-92 standard quote characters.
.. versionchanged:: 0.10.2
A cursor can also be provided instead of a connection, e.g.::
>>> import psycopg2
>>> connection = psycopg2.connect("dbname=test user=postgres")
>>> cursor = connection.cursor()
>>> appenddb(table, cursor, 'foobar')
"""
_todb(table, dbo, tablename, schema=schema, commit=commit, truncate=False)
# default DB quote char per SQL-92
quotechar = '"'
def _quote(s):
# crude way to sanitise table and field names
# conform with the SQL-92 standard. See http://stackoverflow.com/a/214344
return quotechar + s.replace(quotechar, quotechar+quotechar) + quotechar
def _placeholders(connection, names):
# discover the paramstyle
if connection is None:
# default to using question mark
debug('connection is None, default to using qmark paramstyle')
placeholders = ', '.join(['?'] * len(names))
else:
mod = __import__(connection.__class__.__module__)
if not hasattr(mod, 'paramstyle'):
debug('module %r from connection %r has no attribute paramstyle, '
'defaulting to qmark' , mod, connection)
# default to using question mark
placeholders = ', '.join(['?'] * len(names))
elif mod.paramstyle == 'qmark':
debug('found paramstyle qmark')
placeholders = ', '.join(['?'] * len(names))
elif mod.paramstyle in ('format', 'pyformat'):
debug('found paramstyle pyformat')
placeholders = ', '.join(['%s'] * len(names))
elif mod.paramstyle == 'numeric':
debug('found paramstyle numeric')
placeholders = ', '.join([':' + str(i + 1)
for i in range(len(names))])
else:
debug('found unexpected paramstyle %r, defaulting to qmark',
mod.paramstyle)
placeholders = ', '.join(['?'] * len(names))
return placeholders
|
wakermahmud/sync-engine
|
refs/heads/master
|
inbox/models/event.py
|
1
|
import arrow
from datetime import datetime
from dateutil.parser import parse as date_parse
import ast
from sqlalchemy import (Column, String, ForeignKey, Text, Boolean, Integer,
DateTime, Enum, Index, event)
from sqlalchemy.orm import relationship, backref, validates, reconstructor
from sqlalchemy.types import TypeDecorator
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.ext.associationproxy import association_proxy
from inbox.sqlalchemy_ext.util import MAX_TEXT_LENGTH, BigJSON, MutableList
from inbox.models.base import MailSyncBase
from inbox.models.mixins import HasPublicID, HasRevisions
from inbox.models.calendar import Calendar
from inbox.models.namespace import Namespace
from inbox.models.message import Message
from inbox.models.when import Time, TimeSpan, Date, DateSpan
from inbox.events.util import parse_rrule_datetime
from inbox.log import get_logger
log = get_logger()
TITLE_MAX_LEN = 1024
LOCATION_MAX_LEN = 255
RECURRENCE_MAX_LEN = 255
REMINDER_MAX_LEN = 255
OWNER_MAX_LEN = 1024
_LENGTHS = {'location': LOCATION_MAX_LEN,
'owner': OWNER_MAX_LEN,
'recurrence': MAX_TEXT_LENGTH,
'reminders': REMINDER_MAX_LEN,
'title': TITLE_MAX_LEN,
'raw_data': MAX_TEXT_LENGTH}
EVENT_STATUSES = ["confirmed", "tentative", "cancelled"]
time_parse = lambda x: arrow.get(x).to('utc').naive
class FlexibleDateTime(TypeDecorator):
"""Coerce arrow times to naive datetimes before handing to the database."""
impl = DateTime
def process_bind_param(self, value, dialect):
if isinstance(value, arrow.arrow.Arrow):
value = value.to('utc').naive
if isinstance(value, datetime):
value = arrow.get(value).to('utc').naive
return value
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return arrow.get(value).to('utc')
def compare_values(self, x, y):
if isinstance(x, datetime) or isinstance(x, int):
x = arrow.get(x)
if isinstance(y, datetime) or isinstance(x, int):
y = arrow.get(y)
return x == y
class Event(MailSyncBase, HasRevisions, HasPublicID):
"""Data for events."""
API_OBJECT_NAME = 'event'
namespace_id = Column(ForeignKey(Namespace.id, ondelete='CASCADE'),
nullable=False)
namespace = relationship(Namespace, load_on_pending=True)
calendar_id = Column(ForeignKey(Calendar.id, ondelete='CASCADE'),
nullable=False)
# Note that we configure a delete cascade, rather than
# passive_deletes=True, in order to ensure that delete revisions are
# created for events if their parent calendar is deleted.
calendar = relationship(Calendar,
backref=backref('events', cascade='delete'),
load_on_pending=True)
# A server-provided unique ID.
uid = Column(String(767, collation='ascii_general_ci'), nullable=False)
# DEPRECATED
# TODO(emfree): remove
provider_name = Column(String(64), nullable=False, default='DEPRECATED')
source = Column('source', Enum('local', 'remote'), default='local')
raw_data = Column(Text, nullable=False)
title = Column(String(TITLE_MAX_LEN), nullable=True)
# The database column is named differently for legacy reasons.
owner = Column('owner2', String(OWNER_MAX_LEN), nullable=True)
description = Column('_description', LONGTEXT, nullable=True)
location = Column(String(LOCATION_MAX_LEN), nullable=True)
busy = Column(Boolean, nullable=False, default=True)
read_only = Column(Boolean, nullable=False)
reminders = Column(String(REMINDER_MAX_LEN), nullable=True)
recurrence = Column(Text, nullable=True)
start = Column(FlexibleDateTime, nullable=False)
end = Column(FlexibleDateTime, nullable=True)
all_day = Column(Boolean, nullable=False)
is_owner = Column(Boolean, nullable=False, default=True)
last_modified = Column(FlexibleDateTime, nullable=True)
status = Column('status', Enum(*EVENT_STATUSES),
server_default='confirmed')
# This column is only used for events that are synced from iCalendar
# files.
message_id = Column(ForeignKey(Message.id, ondelete='CASCADE'),
nullable=True)
message = relationship(Message,
backref=backref('events',
order_by='Event.last_modified',
cascade='all, delete-orphan'))
__table_args__ = (Index('ix_event_ns_uid_calendar_id',
'namespace_id', 'uid', 'calendar_id'),)
participants = Column(MutableList.as_mutable(BigJSON), default=[],
nullable=True)
# This is only used by the iCalendar invite code. The sequence number
# stores the version number of the invite.
sequence_number = Column(Integer, nullable=True)
discriminator = Column('type', String(30))
__mapper_args__ = {'polymorphic_on': discriminator,
'polymorphic_identity': 'event'}
@validates('reminders', 'recurrence', 'owner', 'location', 'title',
'raw_data')
def validate_length(self, key, value):
max_len = _LENGTHS[key]
return value if value is None else value[:max_len]
@property
def when(self):
if self.all_day:
# Dates are stored as DateTimes so transform to dates here.
start = arrow.get(self.start).to('utc').date()
end = arrow.get(self.end).to('utc').date()
return Date(start) if start == end else DateSpan(start, end)
else:
start = self.start
end = self.end
return Time(start) if start == end else TimeSpan(start, end)
@when.setter
def when(self, when):
if 'time' in when:
self.start = self.end = time_parse(when['time'])
self.all_day = False
elif 'start_time' in when:
self.start = time_parse(when['start_time'])
self.end = time_parse(when['end_time'])
self.all_day = False
elif 'date' in when:
self.start = self.end = date_parse(when['date'])
self.all_day = True
elif 'start_date' in when:
self.start = date_parse(when['start_date'])
self.end = date_parse(when['end_date'])
self.all_day = True
def _merge_participant_attributes(self, left, right):
"""Merge right into left. Right takes precedence unless it's null."""
for attribute in right.keys():
# Special cases:
if right[attribute] is None:
continue
elif right[attribute] == '':
continue
elif right['status'] == 'noreply':
continue
else:
left[attribute] = right[attribute]
return left
def _partial_participants_merge(self, event):
"""Merge the participants from event into self.participants.
event always takes precedence over self, except if
a participant in self isn't in event.
This method is only called by the ical merging code because
iCalendar attendance updates are partial: an RSVP reply often
only contains the status of the person that RSVPs.
It would be very wrong to call this method to merge, say, Google
Events participants because they handle the merging themselves.
"""
# We have to jump through some hoops because a participant may
# not have an email or may not have a name, so we build a hash
# where we can find both. Also note that we store names in the
# hash only if the email is None.
self_hash = {}
for participant in self.participants:
email = participant.get('email')
name = participant.get('name')
if email is not None:
self_hash[email] = participant
elif name is not None:
# We have a name without an email.
self_hash[name] = participant
for participant in event.participants:
email = participant.get('email')
name = participant.get('name')
# This is the tricky part --- we only want to store one entry per
# participant --- we check if there's an email we already know, if
# not we create it. Otherwise we use the name. This sorta works
# because we're merging updates to an event and ical updates
# always have an email address.
# - karim
if email is not None:
if email in self_hash:
self_hash[email] =\
self._merge_participant_attributes(self_hash[email],
participant)
else:
self_hash[email] = participant
elif name is not None:
if name in self_hash:
self_hash[name] =\
self._merge_participant_attributes(self_hash[name],
participant)
else:
self_hash[name] = participant
return self_hash.values()
def update(self, event):
if event.namespace is not None and event.namespace.id is not None:
self.namespace_id = event.namespace.id
if event.calendar is not None and event.calendar.id is not None:
self.calendar_id = event.calendar.id
if event.provider_name is not None:
self.provider_name = event.provider_name
self.uid = event.uid
self.raw_data = event.raw_data
self.title = event.title
self.description = event.description
self.location = event.location
self.start = event.start
self.end = event.end
self.all_day = event.all_day
self.owner = event.owner
self.is_owner = event.is_owner
self.read_only = event.read_only
self.participants = event.participants
self.busy = event.busy
self.reminders = event.reminders
self.recurrence = event.recurrence
self.last_modified = event.last_modified
self.message = event.message
self.status = event.status
if event.sequence_number is not None:
self.sequence_number = event.sequence_number
@property
def recurring(self):
if self.recurrence and self.recurrence != '':
try:
r = ast.literal_eval(self.recurrence)
if isinstance(r, str):
r = [r]
return r
except ValueError:
log.warn('Invalid RRULE entry for event', event_id=self.id)
return []
return []
@property
def is_recurring(self):
return self.recurrence is not None
@property
def length(self):
return self.when.delta
@property
def cancelled(self):
return self.status == 'cancelled'
@cancelled.setter
def cancelled(self, is_cancelled):
if is_cancelled:
self.status = 'cancelled'
else:
self.status = 'confirmed'
@classmethod
def __new__(cls, *args, **kwargs):
# Decide whether or not to instantiate a RecurringEvent/Override
# based on the kwargs we get.
cls_ = cls
recurrence = kwargs.get('recurrence')
master_event_uid = kwargs.get('master_event_uid')
if recurrence and master_event_uid:
raise ValueError("Event can't have both recurrence and master UID")
if recurrence and recurrence != '':
cls_ = RecurringEvent
if master_event_uid:
cls_ = RecurringEventOverride
return object.__new__(cls_, *args, **kwargs)
def __init__(self, **kwargs):
# Allow arguments for all subclasses to be passed to main constructor
for k in kwargs.keys():
if not hasattr(type(self), k):
del kwargs[k]
super(Event, self).__init__(**kwargs)
class RecurringEvent(Event):
""" Represents an individual one-off instance of a recurring event,
including cancelled events.
"""
__mapper_args__ = {'polymorphic_identity': 'recurringevent'}
__table_args__ = None
id = Column(Integer, ForeignKey('event.id', ondelete='CASCADE'),
primary_key=True)
rrule = Column(String(RECURRENCE_MAX_LEN))
exdate = Column(Text) # There can be a lot of exception dates
until = Column(FlexibleDateTime, nullable=True)
start_timezone = Column(String(35))
override_uids = association_proxy('overrides', 'uid')
def __init__(self, **kwargs):
self.start_timezone = kwargs.pop('original_start_tz', None)
kwargs['recurrence'] = repr(kwargs['recurrence'])
super(RecurringEvent, self).__init__(**kwargs)
try:
self.unwrap_rrule()
except Exception as e:
log.error("Error parsing RRULE entry", event_id=self.id,
error=e, exc_info=True)
# FIXME @karim: use an overrided property instead of a reconstructor.
@reconstructor
def reconstruct(self):
try:
self.unwrap_rrule()
except Exception as e:
log.error("Error parsing stored RRULE entry", event_id=self.id,
error=e, exc_info=True)
def inflate(self, start=None, end=None):
# Convert a RecurringEvent into a series of InflatedEvents
# by expanding its RRULE into a series of start times.
from inbox.events.recurring import get_start_times
occurrences = get_start_times(self, start, end)
return [InflatedEvent(self, o) for o in occurrences]
def unwrap_rrule(self):
# Unwraps the RRULE list of strings into RecurringEvent properties.
for item in self.recurring:
if item.startswith('RRULE'):
self.rrule = item
if 'UNTIL' in item:
for p in item.split(';'):
if p.startswith('UNTIL'):
self.until = parse_rrule_datetime(p[6:])
elif item.startswith('EXDATE'):
self.exdate = item
def all_events(self, start=None, end=None):
# Returns all inflated events along with overrides that match the
# provided time range.
overrides = self.overrides
if start:
overrides = overrides.filter(RecurringEventOverride.start > start)
if end:
overrides = overrides.filter(RecurringEventOverride.end < end)
events = list(overrides)
uids = [e.uid for e in events]
# Remove cancellations from the override set
events = filter(lambda e: not e.cancelled, events)
# If an override has not changed the start time for an event, including
# if the override is a cancellation, the RRULE doesn't include an
# exception for it. Filter out unnecessary inflated events
# to cover this case: they will have the same UID.
for e in self.inflate(start, end):
if e.uid not in uids:
events.append(e)
return sorted(events, key=lambda e: e.start)
def update(self, event):
super(RecurringEvent, self).update(event)
if isinstance(event, type(self)):
self.rrule = event.rrule
self.exdate = event.exdate
self.until = event.until
self.start_timezone = event.start_timezone
class RecurringEventOverride(Event):
""" Represents an individual one-off instance of a recurring event,
including cancelled events.
"""
id = Column(Integer, ForeignKey('event.id', ondelete='CASCADE'),
primary_key=True)
__mapper_args__ = {'polymorphic_identity': 'recurringeventoverride',
'inherit_condition': (id == Event.id)}
__table_args__ = None
master_event_id = Column(ForeignKey('event.id'))
master_event_uid = Column(String(767, collation='ascii_general_ci'),
index=True)
original_start_time = Column(FlexibleDateTime)
master = relationship(RecurringEvent, foreign_keys=[master_event_id],
backref=backref('overrides', lazy="dynamic"))
def update(self, event):
super(RecurringEventOverride, self).update(event)
if isinstance(event, type(self)):
self.master_event_uid = event.master_event_uid
self.original_start_time = event.original_start_time
self.recurrence = None # These single instances don't recur
class InflatedEvent(Event):
""" This represents an individual instance of a recurring event, generated
on the fly when a recurring event is expanded.
These are transient objects that should never be committed to the
database.
"""
__mapper_args__ = {'polymorphic_identity': 'inflatedevent'}
__tablename__ = 'event'
__table_args__ = {'extend_existing': True}
def __init__(self, event, instance_start):
self.master = event
self.update(self.master)
self.read_only = True # Until we support modifying inflated events
# Give inflated events a UID consisting of the master UID and the
# original UTC start time of the inflation.
ts_id = instance_start.strftime("%Y%m%dT%H%M%SZ")
self.uid = "{}_{}".format(self.master.uid, ts_id)
self.public_id = "{}_{}".format(self.master.public_id, ts_id)
self.set_start_end(instance_start)
def set_start_end(self, start):
# get the length from the master event
length = self.master.length
self.start = start.to('utc')
self.end = self.start + length
def update(self, master):
super(InflatedEvent, self).update(master)
self.namespace_id = master.namespace_id
self.calendar_id = master.calendar_id
def insert_warning(mapper, connection, target):
log.warn("InflatedEvent {} shouldn't be committed".format(target))
raise Exception("InflatedEvent should not be committed")
event.listen(InflatedEvent, 'before_insert', insert_warning)
|
xrmx/django
|
refs/heads/master
|
django/conf/locale/cy/formats.py
|
504
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y' # '25 Hydref 2006'
TIME_FORMAT = 'P' # '2:30 y.b.'
DATETIME_FORMAT = 'j F Y, P' # '25 Hydref 2006, 2:30 y.b.'
YEAR_MONTH_FORMAT = 'F Y' # 'Hydref 2006'
MONTH_DAY_FORMAT = 'j F' # '25 Hydref'
SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006'
SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 y.b.'
FIRST_DAY_OF_WEEK = 1 # 'Dydd Llun'
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
|
sonali0901/zulip
|
refs/heads/master
|
api/integrations/hg/zulip-changegroup.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Zulip hook for Mercurial changeset pushes.
# Copyright © 2012-2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
# This hook is called when changesets are pushed to the master repository (ie
# `hg push`). See https://zulipchat.com/integrations for installation instructions.
from __future__ import absolute_import
import zulip
from six.moves import range
from typing import Any, Optional, Text
from mercurial import ui, repo
VERSION = "0.9"
def format_summary_line(web_url, user, base, tip, branch, node):
# type: (str, str, int, int, str, Text) -> Text
"""
Format the first line of the message, which contains summary
information about the changeset and links to the changelog if a
web URL has been configured:
Jane Doe <jane@example.com> pushed 1 commit to master (170:e494a5be3393):
"""
revcount = tip - base
plural = "s" if revcount > 1 else ""
if web_url:
shortlog_base_url = web_url.rstrip("/") + "/shortlog/"
summary_url = "{shortlog}{tip}?revcount={revcount}".format(
shortlog=shortlog_base_url, tip=tip - 1, revcount=revcount)
formatted_commit_count = "[{revcount} commit{s}]({url})".format(
revcount=revcount, s=plural, url=summary_url)
else:
formatted_commit_count = "{revcount} commit{s}".format(
revcount=revcount, s=plural)
return u"**{user}** pushed {commits} to **{branch}** (`{tip}:{node}`):\n\n".format(
user=user, commits=formatted_commit_count, branch=branch, tip=tip,
node=node[:12])
def format_commit_lines(web_url, repo, base, tip):
# type: (str, repo, int, int) -> str
"""
Format the per-commit information for the message, including the one-line
commit summary and a link to the diff if a web URL has been configured:
"""
if web_url:
rev_base_url = web_url.rstrip("/") + "/rev/"
commit_summaries = []
for rev in range(base, tip):
rev_node = repo.changelog.node(rev)
rev_ctx = repo.changectx(rev_node)
one_liner = rev_ctx.description().split("\n")[0]
if web_url:
summary_url = rev_base_url + str(rev_ctx)
summary = "* [{summary}]({url})".format(
summary=one_liner, url=summary_url)
else:
summary = "* {summary}".format(summary=one_liner)
commit_summaries.append(summary)
return "\n".join(summary for summary in commit_summaries)
def send_zulip(email, api_key, site, stream, subject, content):
# type: (str, str, str, str, str, Text) -> str
"""
Send a message to Zulip using the provided credentials, which should be for
a bot in most cases.
"""
client = zulip.Client(email=email, api_key=api_key,
site=site,
client="ZulipMercurial/" + VERSION)
message_data = {
"type": "stream",
"to": stream,
"subject": subject,
"content": content,
}
client.send_message(message_data)
def get_config(ui, item):
# type: (ui, str) -> Optional[str]
try:
# configlist returns everything in lists.
return ui.configlist('zulip', item)[0]
except IndexError:
return None
def hook(ui, repo, **kwargs):
# type: (ui, repo, **Text) -> None
"""
Invoked by configuring a [hook] entry in .hg/hgrc.
"""
hooktype = kwargs["hooktype"]
node = kwargs["node"]
ui.debug("Zulip: received {hooktype} event\n".format(hooktype=hooktype))
if hooktype != "changegroup":
ui.warn("Zulip: {hooktype} not supported\n".format(hooktype=hooktype))
exit(1)
ctx = repo.changectx(node)
branch = ctx.branch()
# If `branches` isn't specified, notify on all branches.
branch_whitelist = get_config(ui, "branches")
branch_blacklist = get_config(ui, "ignore_branches")
if branch_whitelist:
# Only send notifications on branches we are watching.
watched_branches = [b.lower().strip() for b in branch_whitelist.split(",")]
if branch.lower() not in watched_branches:
ui.debug("Zulip: ignoring event for {branch}\n".format(branch=branch))
exit(0)
if branch_blacklist:
# Don't send notifications for branches we've ignored.
ignored_branches = [b.lower().strip() for b in branch_blacklist.split(",")]
if branch.lower() in ignored_branches:
ui.debug("Zulip: ignoring event for {branch}\n".format(branch=branch))
exit(0)
# The first and final commits in the changeset.
base = repo[node].rev()
tip = len(repo)
email = get_config(ui, "email")
api_key = get_config(ui, "api_key")
site = get_config(ui, "site")
if not (email and api_key):
ui.warn("Zulip: missing email or api_key configurations\n")
ui.warn("in the [zulip] section of your .hg/hgrc.\n")
exit(1)
stream = get_config(ui, "stream")
# Give a default stream if one isn't provided.
if not stream:
stream = "commits"
web_url = get_config(ui, "web_url")
user = ctx.user()
content = format_summary_line(web_url, user, base, tip, branch, node)
content += format_commit_lines(web_url, repo, base, tip)
subject = branch
ui.debug("Sending to Zulip:\n")
ui.debug(content + "\n")
send_zulip(email, api_key, site, stream, subject, content)
|
jkenn99/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/port/win_unittest.py
|
115
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
import unittest2 as unittest
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import port_testcase
from webkitpy.port.win import WinPort
from webkitpy.tool.mocktool import MockOptions
class WinPortTest(port_testcase.PortTestCase):
os_name = 'win'
os_version = 'xp'
port_name = 'win-xp'
port_maker = WinPort
def test_show_results_html_file(self):
port = self.make_port()
port._executive = MockExecutive(should_log=True)
capture = OutputCapture()
capture.capture_output()
port.show_results_html_file('test.html')
_, _, logs = capture.restore_output()
# We can't know for sure what path will be produced by cygpath, but we can assert about
# everything else.
self.assertTrue(logs.startswith("MOCK run_command: ['Tools/Scripts/run-safari', '--release', '"))
self.assertTrue(logs.endswith("test.html'], cwd=/mock-checkout\n"))
def _assert_search_path(self, expected_search_paths, version, use_webkit2=False):
port = self.make_port(port_name='win', os_version=version, options=MockOptions(webkit_test_runner=use_webkit2))
absolute_search_paths = map(port._webkit_baseline_path, expected_search_paths)
self.assertEqual(port.baseline_search_path(), absolute_search_paths)
def test_baseline_search_path(self):
self._assert_search_path(['win-xp', 'win-vista', 'win-7sp0', 'win', 'mac-lion', 'mac'], 'xp')
self._assert_search_path(['win-vista', 'win-7sp0', 'win', 'mac-lion', 'mac'], 'vista')
self._assert_search_path(['win-7sp0', 'win', 'mac-lion', 'mac'], '7sp0')
self._assert_search_path(['win-wk2', 'win-xp', 'win-vista', 'win-7sp0', 'win', 'mac-wk2', 'mac-lion', 'mac'], 'xp', use_webkit2=True)
self._assert_search_path(['win-wk2', 'win-vista', 'win-7sp0', 'win', 'mac-wk2', 'mac-lion', 'mac'], 'vista', use_webkit2=True)
self._assert_search_path(['win-wk2', 'win-7sp0', 'win', 'mac-wk2', 'mac-lion', 'mac'], '7sp0', use_webkit2=True)
def _assert_version(self, port_name, expected_version):
host = MockSystemHost(os_name='win', os_version=expected_version)
port = WinPort(host, port_name=port_name)
self.assertEqual(port.version(), expected_version)
def test_versions(self):
self._assert_version('win-xp', 'xp')
self._assert_version('win-vista', 'vista')
self._assert_version('win-7sp0', '7sp0')
self.assertRaises(AssertionError, self._assert_version, 'win-me', 'xp')
def test_compare_text(self):
expected = "EDITING DELEGATE: webViewDidChangeSelection:WebViewDidChangeSelectionNotification\nfoo\nEDITING DELEGATE: webViewDidChangeSelection:WebViewDidChangeSelectionNotification\n"
port = self.make_port()
self.assertFalse(port.do_text_results_differ(expected, "foo\n"))
self.assertTrue(port.do_text_results_differ(expected, "foo"))
self.assertTrue(port.do_text_results_differ(expected, "bar"))
# This hack doesn't exist in WK2.
port._options = MockOptions(webkit_test_runner=True)
self.assertTrue(port.do_text_results_differ(expected, "foo\n"))
def test_operating_system(self):
self.assertEqual('win', self.make_port().operating_system())
def test_runtime_feature_list(self):
port = self.make_port()
port._executive.run_command = lambda command, cwd=None, error_handler=None: "Nonsense"
# runtime_features_list returns None when its results are meaningless (it couldn't run DRT or parse the output, etc.)
self.assertEqual(port._runtime_feature_list(), None)
port._executive.run_command = lambda command, cwd=None, error_handler=None: "SupportedFeatures:foo bar"
self.assertEqual(port._runtime_feature_list(), ['foo', 'bar'])
def test_expectations_files(self):
self.assertEqual(len(self.make_port().expectations_files()), 3)
self.assertEqual(len(self.make_port(options=MockOptions(webkit_test_runner=True, configuration='Release')).expectations_files()), 5)
def test_get_crash_log(self):
# Win crash logs are tested elsewhere, so here we just make sure we don't crash.
def fake_time_cb():
times = [0, 20, 40]
return lambda: times.pop(0)
port = self.make_port(port_name='win')
port._get_crash_log('DumpRenderTree', 1234, '', '', 0,
time_fn=fake_time_cb(), sleep_fn=lambda delay: None)
|
hpe-storage/horizon-hpe-storage-ui
|
refs/heads/master
|
horizon_hpe_storage/storage_panel/overview/panel.py
|
2
|
# (c) Copyright [2015] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.admin import dashboard
class OverviewPanel(horizon.Panel):
name = _("HPE Storage")
slug = "hpe_storage"
# permissions = ('openstack.services.deep_link',)
dashboard.Admin.register(HPEStorage)
|
ZenHarbinger/snapcraft
|
refs/heads/master
|
snapcraft/internal/sources/_local.py
|
3
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import glob
import os
import shutil
from snapcraft import file_utils
from snapcraft.internal import common
from ._base import Base
class Local(Base):
def pull(self):
if os.path.islink(self.source_dir) or os.path.isfile(self.source_dir):
os.remove(self.source_dir)
elif os.path.isdir(self.source_dir):
shutil.rmtree(self.source_dir)
current_dir = os.getcwd()
source_abspath = os.path.abspath(self.source)
def ignore(directory, files):
if directory == source_abspath or \
directory == current_dir:
ignored = copy.copy(common.SNAPCRAFT_FILES)
snaps = glob.glob(os.path.join(directory, '*.snap'))
if snaps:
snaps = [os.path.basename(s) for s in snaps]
ignored += snaps
return ignored
else:
return []
shutil.copytree(source_abspath, self.source_dir, symlinks=True,
copy_function=file_utils.link_or_copy, ignore=ignore)
|
patrys/autocomplete-python
|
refs/heads/master
|
lib/jedi/__init__.py
|
34
|
"""
Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its
historic focus is autocompletion, but does static analysis for now as well.
Jedi is fast and is very well tested. It understands Python on a deeper level
than all other static analysis frameworks for Python.
Jedi has support for two different goto functions. It's possible to search for
related names and to list all names in a Python file and infer them. Jedi
understands docstrings and you can use Jedi autocompletion in your REPL as
well.
Jedi uses a very simple API to connect with IDE's. There's a reference
implementation as a `VIM-Plugin <https://github.com/davidhalter/jedi-vim>`_,
which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs.
It's really easy.
To give you a simple example how you can use the Jedi library, here is an
example for the autocompletion feature:
>>> import jedi
>>> source = '''
... import datetime
... datetime.da'''
>>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py')
>>> script
<Script: 'example.py'>
>>> completions = script.completions()
>>> completions #doctest: +ELLIPSIS
[<Completion: date>, <Completion: datetime>, ...]
>>> print(completions[0].complete)
te
>>> print(completions[0].name)
date
As you see Jedi is pretty simple and allows you to concentrate on writing a
good text editor, while still having very good IDE features for Python.
"""
__version__ = '0.9.0'
from jedi.api import Script, Interpreter, NotFoundError, set_debug_function
from jedi.api import preload_module, defined_names, names
from jedi import settings
|
clumsy/intellij-community
|
refs/heads/master
|
python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/fixes/fix_basestring.py
|
326
|
"""Fixer for basestring -> str."""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixBasestring(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "'basestring'"
def transform(self, node, results):
return Name(u"str", prefix=node.prefix)
|
Fokko/incubator-airflow
|
refs/heads/master
|
airflow/utils/sqlalchemy.py
|
1
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import json
import os
import pendulum
from dateutil import relativedelta
from sqlalchemy import event, exc
from sqlalchemy.types import DateTime, Text, TypeDecorator
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
utc = pendulum.timezone('UTC')
def setup_event_handlers(engine):
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record): # pylint: disable=unused-variable
connection_record.info['pid'] = os.getpid()
if engine.dialect.name == "sqlite":
@event.listens_for(engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record): # pylint: disable=unused-variable
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
# this ensures sanity in mysql when storing datetimes (not required for postgres)
if engine.dialect.name == "mysql":
@event.listens_for(engine, "connect")
def set_mysql_timezone(dbapi_connection, connection_record): # pylint: disable=unused-variable
cursor = dbapi_connection.cursor()
cursor.execute("SET time_zone = '+00:00'")
cursor.close()
@event.listens_for(engine, "checkout")
def checkout(dbapi_connection, connection_record, connection_proxy): # pylint: disable=unused-variable
pid = os.getpid()
if connection_record.info['pid'] != pid:
connection_record.connection = connection_proxy.connection = None
raise exc.DisconnectionError(
"Connection record belongs to pid {}, "
"attempting to check out in pid {}".format(connection_record.info['pid'], pid)
)
class UtcDateTime(TypeDecorator):
"""
Almost equivalent to :class:`~sqlalchemy.types.DateTime` with
``timezone=True`` option, but it differs from that by:
- Never silently take naive :class:`~datetime.datetime`, instead it
always raise :exc:`ValueError` unless time zone aware value.
- :class:`~datetime.datetime` value's :attr:`~datetime.datetime.tzinfo`
is always converted to UTC.
- Unlike SQLAlchemy's built-in :class:`~sqlalchemy.types.DateTime`,
it never return naive :class:`~datetime.datetime`, but time zone
aware value, even with SQLite or MySQL.
- Always returns DateTime in UTC
"""
impl = DateTime(timezone=True)
def process_bind_param(self, value, dialect):
if value is not None:
if not isinstance(value, datetime.datetime):
raise TypeError('expected datetime.datetime, not ' +
repr(value))
elif value.tzinfo is None:
raise ValueError('naive datetime is disallowed')
return value.astimezone(utc)
def process_result_value(self, value, dialect):
"""
Processes DateTimes from the DB making sure it is always
returning UTC. Not using timezone.convert_to_utc as that
converts to configured TIMEZONE while the DB might be
running with some other setting. We assume UTC datetimes
in the database.
"""
if value is not None:
if value.tzinfo is None:
value = value.replace(tzinfo=utc)
else:
value = value.astimezone(utc)
return value
class Interval(TypeDecorator):
impl = Text
attr_keys = {
datetime.timedelta: ('days', 'seconds', 'microseconds'),
relativedelta.relativedelta: (
'years', 'months', 'days', 'leapdays', 'hours', 'minutes', 'seconds', 'microseconds',
'year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond',
),
}
def process_bind_param(self, value, dialect):
if type(value) in self.attr_keys:
attrs = {
key: getattr(value, key)
for key in self.attr_keys[type(value)]
}
return json.dumps({'type': type(value).__name__, 'attrs': attrs})
return json.dumps(value)
def process_result_value(self, value, dialect):
if not value:
return value
data = json.loads(value)
if isinstance(data, dict):
type_map = {key.__name__: key for key in self.attr_keys}
return type_map[data['type']](**data['attrs'])
return data
|
potash/scikit-learn
|
refs/heads/master
|
sklearn/metrics/tests/test_common.py
|
7
|
from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"macro_roc_auc",
"samples_roc_auc",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_loss",
"label_ranking_average_precision_score",
]
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = [
"brier_score_loss",
"matthews_corrcoef_score",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
]
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union(
set(METRIC_UNDEFINED_MULTICLASS))
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"hamming_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix", # Left this one here because the tests in this file do
# not work for confusion_matrix, as its output is a
# matrix instead of a number. Testing of
# confusion_matrix with sample_weight is in
# test_classification.py
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(
NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS,
METRIC_UNDEFINED_BINARY_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
def test_inf_nan_input():
invalids =[([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
METRICS = dict()
METRICS.update(THRESHOLDED_METRICS)
METRICS.update(REGRESSION_METRICS)
for metric in METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"contains NaN, infinity",
metric, y_true, y_score)
# Classification metrics all raise a mixed input exception
for metric in CLASSIFICATION_METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"Can't handle mix of binary and continuous",
metric, y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or
name in THRESHOLDED_METRICS):
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 += [0]*n_classes
y2 += [0]*n_classes
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%f != %f) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
|
lingxiaoyang/N2O
|
refs/heads/master
|
app/modules/poll/api.py
|
1
|
from django.utils import timezone
from common.headers import *
from app.models import *
VALIDATION_FAILED = L('Form validation failed.')
def urlpatterns():
return patterns('',
url('^list/$', get_list),
url('^add/$', add),
url('^item:(?P<pk>.*)/$', item),
)
@require_GET
def get_list(request):
polls = map(lambda p: p.as_json(), Poll.objects.order_by('-pub_date'))
return JsonResponse(polls)
@require_POST
def add(request):
data = json.loads(request.body)
isValid = True
error = {}
poll_data = Poll().as_json()
poll_data.update({
'question': data.get('question'),
'pub_date': timezone.now(),
})
poll_form = PollForm(poll_data)
poll_instance = poll_form.instance
isValid = isValid and poll_form.is_valid()
error.update(poll_form.errors)
error['choices'] = []
choices_instances = []
for choice in data['choices']:
choice_data = Choice().as_json()
choice_data.update(choice)
choice_form = ChoiceForm(choice_data)
choices_instances.append(choice_form.instance)
isValid = isValid and choice_form.is_valid()
error['choices'].append(choice_form.errors)
if not isValid:
print error
return JsonResponse(error, app_notify=VALIDATION_FAILED, status=400)
else:
poll_instance.save()
for c in choices_instances:
c.poll = poll_instance
c.save()
msg = L("Poll added as #%s.") % poll_instance.pk
return HttpResponse(app_notify=msg)
def item(request, pk):
p = get_object_or_404(Poll, pk=pk)
if request.method == 'GET':
return JsonResponse(p.as_json())
elif request.method == 'POST':
data = json.loads(request.body)
isValid = True
error = {}
#poll_data = Poll().as_json()
#poll_data.update({
## 'question': data.get('question'),
# 'pub_date': timezone.now(),
#})
poll_data = data
poll_data['pub_date'] = timezone.now()
poll_form = PollForm(poll_data, instance=p)
poll_instance = poll_form.instance
isValid = isValid and poll_form.is_valid()
error.update(poll_form.errors)
error['choices'] = []
choices_instances = []
for choice in data['choices']:
cpk = choice.get('pk')
choice_data = choice
if cpk is not None:
# old choice
c = Choice.objects.get(pk=choice['pk'])
choice_form = ChoiceForm(choice_data, instance=c)
else:
choice_form = ChoiceForm(choice_data)
choices_instances.append(choice_form.instance)
isValid = isValid and choice_form.is_valid()
error['choices'].append(choice_form.errors)
if not isValid:
return JsonResponse(error, app_notify=VALIDATION_FAILED, status=400)
else:
poll_instance.save()
for c in choices_instances:
c.poll = poll_instance
c.save()
msg = L("Poll #%s edited successfully.") % poll_instance.pk
return HttpResponse(app_notify=msg)
elif request.method == 'DELETE':
p.delete()
msg = L("Poll #%s deleted.") % pk
return HttpResponse(app_notify=msg)
|
engine95/navel-990
|
refs/heads/master
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
Taapat/enigma2-openpli-fulan
|
refs/heads/master
|
lib/python/Components/Converter/RotorPosition.py
|
29
|
# -*- coding: utf-8 -*-
from Converter import Converter
from Components.Element import cached
from Components.config import config
from Tools.Transponder import orbpos
from Components.NimManager import nimmanager
from enigma import eDVBSatelliteEquipmentControl
class RotorPosition(Converter, object):
DEFAULT = 0
WITH_TEXT = 1
TUNER_NAME = 2
def __init__(self, type):
Converter.__init__(self, type)
self.LastRotorPos = config.misc.lastrotorposition.value
config.misc.lastrotorposition.addNotifier(self.forceChanged, initial_call=False)
config.misc.showrotorposition.addNotifier(self.show_hide, initial_call=False)
@cached
def getText(self):
if config.misc.showrotorposition.value != "no":
self.LastRotorPos = config.misc.lastrotorposition.value
(rotor, tuner) = self.isMotorizedTuner()
if rotor:
self.actualizeCfgLastRotorPosition()
if config.misc.showrotorposition.value == "withtext":
return _("Rotor: ") + orbpos(config.misc.lastrotorposition.value)
if config.misc.showrotorposition.value == "tunername":
active_tuner = self.getActiveTuner()
if tuner != active_tuner:
return _("%s:%s") % ("\c0000?0?0" + chr(ord("A")+ tuner), "\c00?0?0?0" + orbpos(config.misc.lastrotorposition.value))
return ""
return orbpos(config.misc.lastrotorposition.value)
return ""
text = property(getText)
def isMotorizedTuner(self):
for x in nimmanager.nim_slots:
for sat in nimmanager.getRotorSatListForNim(x.slot):
if sat[0]:
return (True, x.slot)
return (False, None)
def actualizeCfgLastRotorPosition(self):
if eDVBSatelliteEquipmentControl.getInstance().isRotorMoving():
current_pos = eDVBSatelliteEquipmentControl.getInstance().getTargetOrbitalPosition()
if current_pos != config.misc.lastrotorposition.value:
self.LastRotorPos = config.misc.lastrotorposition.value = current_pos
config.misc.lastrotorposition.save()
def getActiveTuner(self):
if not eDVBSatelliteEquipmentControl.getInstance().isRotorMoving():
service = self.source.service
feinfo = service and service.frontendInfo()
tuner = feinfo and feinfo.getAll(True)
if tuner:
num = tuner.get("tuner_number")
orb_pos = tuner.get("orbital_position")
if isinstance(num, int) and orb_pos:
satList = nimmanager.getRotorSatListForNim(num)
for sat in satList:
if sat[0] == orb_pos:
return num
return ""
def forceChanged(self, configElement=None):
if self.LastRotorPos != config.misc.lastrotorposition.value:
Converter.changed(self, (self.CHANGED_ALL,))
def show_hide(self, configElement=None):
Converter.changed(self, (self.CHANGED_ALL,))
|
huoxudong125/poedit
|
refs/heads/master
|
deps/boost/libs/python/example/quickstart/test_extending.py
|
53
|
# Copyright Ralf W. Grosse-Kunstleve 2006. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
# Using the doctest module here to ensure that the results are as expected.
r'''>>> from extending import *
>>> hi = hello('California')
>>> hi.greet()
'Hello from California'
>>> invite(hi)
'Hello from California! Please come soon!'
>>> hi.invite()
'Hello from California! Please come soon!'
>>> class wordy(hello):
... def greet(self):
... return hello.greet(self) + ', where the weather is fine'
...
>>> hi2 = wordy('Florida')
>>> hi2.greet()
'Hello from Florida, where the weather is fine'
>>> invite(hi2)
'Hello from Florida! Please come soon!'
'''
def run(args = None):
if args is not None:
import sys
sys.argv = args
import doctest, test_extending
return doctest.testmod(test_extending, verbose=True)
if __name__ == '__main__':
import sys
sys.exit(run()[0])
|
bendavis78/django-storages
|
refs/heads/master
|
examples/libcloud_project/urls.py
|
28
|
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'libcloud_project.views.home', name='home'),
# url(r'^libcloud_project/', include('libcloud_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
SpamExperts/SpamPAD
|
refs/heads/master
|
oa/message.py
|
2
|
"""Internal representation of email messages."""
from builtins import str
from builtins import set
from builtins import list
from builtins import dict
from builtins import object
import re
import time
import email
import hashlib
import calendar
import functools
import ipaddress
import email.utils
import html.parser
import collections
import email.header
import email.errors
import email.mime.base
import email.mime.text
import email.feedparser
import email.mime.multipart
from future.utils import PY3
import oa
import oa.context
from oa.received_parser import ReceivedParser
from oa.rules.ruleset import RuleSet
from oa.regex import Regex
URL_RE = Regex(r"""
(
\b # the preceding character must not be alphanumeric
(?:
(?:
(?:https? | ftp) # capture the protocol
:// # skip the boilerplate
)|
(?= ftp\.[^\.\s<>"'\x7f-\xff] )| # allow the protocol to be missing,
(?= www\.[^\.\s<>"'\x7f-\xff] ) # but only if the rest of the url
# starts with "www.x" or "ftp.x"
)
(?:[^\s<>"'\x7f-\xff]+) # capture the guts
)
""", re.VERBOSE)
IPFRE = Regex(r"[\[ \(]{1}[a-fA-F\d\.\:]{7,}?[\] \n;\)]{1}")
STRICT_CHARSETS = frozenset(("quopri-codec", "quopri", "quoted-printable",
"quotedprintable"))
class _ParseHTML(html.parser.HTMLParser):
"""Extract data from HTML parts."""
def __init__(self, collector):
try:
html.parser.HTMLParser.__init__(self, convert_charrefs=False)
except TypeError:
# Python 2 does not have the convert_charrefs argument.
html.parser.HTMLParser.__init__(self)
self.reset()
self.collector = collector
def handle_data(self, data):
"""Keep track of the data."""
data = data.strip()
if data:
self.collector.append(data)
class _Headers(collections.defaultdict):
"""Like a defaultdict that returns an empty list by default, but the
keys are all case insensitive.
"""
def __init__(self):
collections.defaultdict.__init__(self, list)
def get(self, k, d=None):
return super(_Headers, self).get(k.lower(), d)
def __setitem__(self, key, value):
super(_Headers, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(_Headers, self).__getitem__(key.lower())
def __contains__(self, key):
return super(_Headers, self).__contains__(key.lower())
class _memoize(object):
"""Memoize the result of the function in a cache. Used to prevent
superfluous parsing of headers.
"""
def __init__(self, cache_name):
self._cache_name = cache_name
def __call__(self, func):
"""Check if the information is available in a cache, if not call the
function and cache the result.
"""
@functools.wraps(func)
def wrapped_func(fself, name):
from oa.config import LAZY_MODE
if LAZY_MODE:
return func(fself, name)
cache = getattr(fself, self._cache_name)
result = cache.get(name)
if result is None:
result = func(fself, name)
cache[name] = result
return result
return wrapped_func
DEFAULT_SENDERH = (
"X-Sender", "X-Envelope-From", "Envelope-Sender", "Return-Path"
)
class Message(oa.context.MessageContext):
"""Internal representation of an email message. Used for rule matching."""
def __init__(self, global_context, raw_msg):
"""Parse the message, extracts and decode all headers and all
text parts.
"""
self.missing_boundary_header = False
self.missing_header_body_separator = False
super(Message, self).__init__(global_context)
self.raw_msg = self.translate_line_breaks(raw_msg)
self.msg = email.message_from_string(self.raw_msg)
self.headers = _Headers()
self.raw_headers = _Headers()
self.addr_headers = _Headers()
self.name_headers = _Headers()
self.mime_headers = _Headers()
self.received_headers = list()
self.raw_mime_headers = _Headers()
self.header_ips = _Headers()
self.text = ""
self.raw_text = ""
self.uri_list = set()
self.score = 0
self.rules_checked = dict()
self.interpolate_data = dict()
self.rules_descriptions = dict()
self.plugin_tags = dict()
# Data
self.sender_address = ""
self.hostname_with_ip = list()
self.internal_relays = []
self.external_relays = []
self.last_internal_relay_index = 0
self.last_trusted_relay_index = 0
self.trusted_relays = []
self.untrusted_relays = []
self._parse_message()
self._hook_parsed_metadata()
def clear_matches(self):
"""Clear any already checked rules."""
self.rules_checked = dict()
self.score = 0
@staticmethod
def translate_line_breaks(text):
"""Convert any EOL style to Linux EOL."""
text = text.replace("\r\n", "\n")
return text.replace("\r", "\n")
@staticmethod
def normalize_html_part(payload):
"""Strip all HTML tags."""
data = list()
stripper = _ParseHTML(data)
try:
stripper.feed(payload)
except (UnicodeDecodeError, html.parser.HTMLParseError):
# We can't parse the HTML, so just strip it. This is still
# better than including generic HTML/CSS text.
pass
return data
@staticmethod
def _decode_header(header):
"""Decodes an email header and returns it as a string. Any parts of
the header that cannot be decoded are simply ignored.
"""
parts = list()
try:
decoded_header = email.header.decode_header(header)
except (ValueError, email.header.HeaderParseError):
return
for value, encoding in decoded_header:
if encoding:
try:
parts.append(value.decode(encoding, "ignore"))
except (LookupError, UnicodeError, AssertionError):
continue
else:
try:
parts.append(value.decode("utf-8", "ignore"))
except AttributeError:
parts.append(value)
return "".join(parts)
def get_raw_header(self, header_name):
"""Get a list of raw headers with this name."""
# This is just for consistencies, the raw headers should have been
# parsed together with the message.
return self.raw_headers.get(header_name, list())
def get_headers(self, header_name):
"""Get a list of headers which were added by plugins"""
return self.headers.get(header_name, list())
@_memoize("headers")
def get_decoded_header(self, header_name):
"""Get a list of decoded headers with this name."""
values = list()
for value in self.get_raw_header(header_name):
values.append(self._decode_header(value))
for value in self.get_headers(header_name):
values.append(value)
return values
def get_untrusted_ips(self):
"""Returns the untrusted IPs based on the users trusted
network settings.
:return: A list of `ipaddress.ip_address`.
"""
ips = [ip for ip in self.get_header_ips()
if ip not in self.ctxt.networks.trusted]
return ips
def get_header_ips(self):
values = list()
for header in self.received_headers:
values.append(ipaddress.ip_address(header["ip"]))
return values
@_memoize("addr_headers")
def get_addr_header(self, header_name):
"""Get a list of the first addresses from this header."""
values = list()
for value in self.get_decoded_header(header_name):
for dummy, addr in email.utils.getaddresses([value]):
if addr:
values.append(addr)
break
return values
def get_all_addr_header(self, header_name):
"""Get a list of all the addresses from this header."""
values = list()
for value in self.get_decoded_header(header_name):
for dummy, addr in email.utils.getaddresses([value]):
if addr:
values.append(addr)
return values
def get_all_from_headers_addr(self):
all_from_headers = ['From', 'Envelope-Sender',
'Resent-Sender', 'X-Envelope-From',
'EnvelopeFrom', 'Resent-From']
sender_addr = self.sender_address
for header in all_from_headers:
if header == 'EnvelopeFrom' and sender_addr:
yield sender_addr
else:
for addr in self.get_all_addr_header(header):
yield addr
@_memoize("name_headers")
def get_name_header(self, header_name):
"""Get a list of the first names from this header."""
values = list()
for value in self.get_decoded_header(header_name):
for name, dummy in email.utils.getaddresses([value]):
if name:
values.append(name)
break
return values
def get_raw_mime_header(self, header_name):
"""Get a list of raw MIME headers with this name."""
# This is just for consistencies, the raw headers should have been
# parsed together with the message.
return self.raw_mime_headers.get(header_name, list())
@_memoize("mime_headers")
def get_decoded_mime_header(self, header_name):
"""Get a list of raw MIME headers with this name."""
values = list()
for value in self.get_raw_mime_header(header_name):
values.append(self._decode_header(value))
return values
def iter_decoded_headers(self):
"""Iterate through all the decoded headers.
Yields strings like "<header_name>: <header_value>"
"""
for header_name in self.raw_headers:
for value in self.get_decoded_header(header_name):
yield "%s: %s" % (header_name, value)
def _create_plugin_tags(self, header):
for key, value in header.items():
self.plugin_tags[key.upper()] = value
def _parse_sender(self):
"""Extract the envelope sender from the message."""
always_trust_envelope_from = self.ctxt.conf[
'always_trust_envelope_sender']
headers = self.ctxt.conf["envelope_sender_header"] or DEFAULT_SENDERH
if self.external_relays:
sender = self.external_relays[0].get("envfrom")
if sender:
self.sender_address = sender.strip()
return
else:
if self.trusted_relays and not always_trust_envelope_from:
sender = self.trusted_relays[-1].get("envfrom")
if sender:
self.sender_address = sender.strip()
return
if self.untrusted_relays:
sender = self.untrusted_relays[0].get("envfrom")
if sender:
self.sender_address = sender.strip()
return
for sender_header in headers:
try:
sender = self.get_addr_header(sender_header)[0]
except IndexError:
continue
if sender:
self.sender_address = sender.strip()
self.ctxt.log.debug("Using %s as sender: %s",
sender_header, sender)
return
return
def _parse_relays(self, relays):
"""Walks though a relays list to extract
[un]trusted/internal/external relays"""
is_trusted = True
is_internal = True
found_msa = False
for position, relay in enumerate(relays):
relay['msa'] = 0
if relay['ip']:
ip = ipaddress.ip_address(str(relay['ip']))
in_internal = ip in self.ctxt.networks.internal
in_trusted = ip in self.ctxt.networks.trusted
in_msa = ip in self.ctxt.networks.msa
has_auth = relay.get("auth", None)
if is_trusted and not found_msa:
if self.ctxt.networks.configured:
if not in_trusted and not has_auth:
is_trusted = False
is_internal = False
else:
if is_internal and not has_auth and not in_internal:
is_internal = False
if in_msa:
relay['msa'] = 1
found_msa = True
elif not ip.is_private and not has_auth:
is_internal = False
is_trusted = False
relay['intl'] = int(is_internal)
if is_internal:
self.internal_relays.append(relay)
self.last_internal_relay_index = position
else:
self.external_relays.append(relay)
if is_trusted:
self.trusted_relays.append(relay)
self.last_trusted_relay_index = position
else:
self.untrusted_relays.append(relay)
tag_template = ("[ ip={ip} rdns={rdns} helo={helo} by={by} "
"ident={ident} envfrom={envfrom} intl={intl} id={id} auth={auth} "
"msa={msa} ]")
relays_tags = {
"RELAYSTRUSTED": " ".join([tag_template.format(**x)
for x in self.trusted_relays]),
"RELAYSUNTRUSTED": " ".join([tag_template.format(**x)
for x in self.untrusted_relays]),
"RELAYSINTERNAL": " ".join([tag_template.format(**x)
for x in self.internal_relays]),
"RELAYSEXTERNAL": " ".join([tag_template.format(**x)
for x in self.external_relays]),
}
if self.external_relays:
relays_tags.update({
"LASTEXTERNALIP": self.external_relays[-1]['ip'],
"LASTEXTERNALRDNS": self.external_relays[-1]['rdns'],
"LASTEXTERNALHELO": self.external_relays[-1]['helo']
})
self._create_plugin_tags(relays_tags)
def _parse_message(self):
"""Parse the message."""
self._hook_check_start()
# Dump the message raw headers
for line in self.raw_msg.splitlines():
if not email.feedparser.headerRE.match(line):
# If we saw the RFC defined header/body separator
# (i.e. newline), just throw it away. Otherwise the line is
# part of the body so push it back.
if line.strip():
self.missing_header_body_separator = True
break
for name, raw_value in self.msg._headers:
self.raw_headers[name].append(raw_value)
# XXX This is strange, but it's what SA does.
# The body starts with the Subject header(s)
body = list(self.get_decoded_header("Subject"))
raw_body = list()
for payload, part in self._iter_parts(self.msg):
if not part._headers:
self.missing_boundary_header = True
# Extract any MIME headers
for name, raw_value in part._headers:
self.raw_mime_headers[name].append(raw_value)
text = None
if payload is not None:
# this must be a text part
self.uri_list.update(set(URL_RE.findall(payload)))
if part.get_content_subtype() == "html":
text = self.normalize_html_part(payload.replace("\n", " "))
text = " ".join(text)
body.append(text)
raw_body.append(payload)
else:
text = payload.replace("\n", " ")
body.append(text)
raw_body.append(payload)
self._hook_extract_metadata(payload, text, part)
self.text = " ".join(body)
self.raw_text = "\n".join(raw_body)
received_headers = self.get_decoded_header("Received")
for header in self.ctxt.conf["originating_ip_headers"]:
headers = ["X-ORIGINATING-IP: %s" % x
for x in self.get_decoded_header(header)]
received_headers.extend(headers)
received_obj = ReceivedParser(received_headers)
self.received_headers = received_obj.received
self._parse_relays(self.received_headers)
self._parse_sender()
try:
self._create_plugin_tags(self.received_headers[0])
except IndexError:
pass
for header in self.received_headers:
self.hostname_with_ip.append((header["rdns"], header["ip"]))
@staticmethod
def _iter_parts(msg):
"""Extract and decode the text parts from the parsed email message.
For non-text parts the payload will be None.
Yields (payload, part)
"""
for part in msg.walk():
if part.get_content_maintype() == "text":
payload = part.get_payload(decode=True)
charset = part.get_content_charset()
errors = "ignore"
if not charset:
charset = "ascii"
elif charset.lower().replace("_", "-") in STRICT_CHARSETS:
errors = "strict"
try:
payload = payload.decode(charset, errors)
except (LookupError, UnicodeError, AssertionError):
try:
payload = payload.decode("ascii", "ignore")
except UnicodeError:
continue
yield payload, part
else:
yield None, part
def get_from_addresses(self):
"""Get addresses from 'Resent-From' header,
and if there are no addresses, get from
all FROM_HEADERS.
"""
addresses = self.get_all_addr_header('Resent-From')
if addresses:
for address in addresses:
yield address
else:
for key in FROM_HEADERS:
for address in self.get_all_addr_header(key):
yield address
def get_to_addresses(self):
"""Get addresses from 'Resent-To' and 'Resent-Cc'
headers, ad if there are no addresses, get from
all TO_HEADERS.
"""
addresses = self.get_all_addr_header('Resent-To')
addresses.extend(self.get_all_addr_header('Resent-Cc'))
if addresses:
for address in addresses:
yield address
else:
for key in TO_HEADERS:
for address in self.get_all_addr_header(key):
yield address
@property
def msgid(self):
"""Generate a unique ID for the message.
If the message already has an ID that should be unique, in the
Message-ID header, then simply use that. Otherwise, generate an
ID from the Date header and message content."""
# SA potentially produces multiple IDs, and checks them both.
# That seems an unnecessary complication, so just return the
# first one that we manage to generate.
msgid = self.msg[u"Message-ID"]
if msgid and not re.match(r"^\s*<\s*(?:\@sa_generated)?>.*$", msgid):
# Remove \r and < and > prefix / suffixes.
return msgid.strip().strip(u"<").strip(u">")
# Use the hexdigest of a SHA1 hash of (Date: and top N bytes of
# body), where N is min(1024 bytes, 1/2 of body length).
date = self.msg[u"Date"] or u"None"
body = self.msg.as_string().split("\n\n", 1)[1]
if len(body) > 64:
keep = 1024 if len(body) > 2048 else (len(body) // 2)
body = body[:keep]
# Strip all CR and LF so that testing midstream from MTA and
# post delivery don't generate different IDs simply because of
# LF<->CR<->CRLF changes.
body = body.replace("\n", "").replace("\r", "")
combined = "{date}\x00{body}".format(date=date, body=body)
msgid = u"%s@sa_generated" % hashlib.sha1(
combined.encode('utf-8')
).hexdigest()
return msgid
@property
def receive_date(self):
"""Get the date from the headers."""
received = self.msg.get_all("Received") or list()
for header in received:
try:
ts = header.rsplit(";", 1)[1]
except IndexError:
continue
ts = email.utils.parsedate(ts)
return calendar.timegm(ts)
# SA will look in other headers too. Perhaps we should also?
return time.time()
FROM_HEADERS = ('From', "Envelope-Sender", 'Resent-From', 'X-Envelope-From',
'EnvelopeFrom')
TO_HEADERS = ('To', 'Resent-To', 'Resent-Cc', 'Apparently-To', 'Delivered-To',
'Envelope-Recipients', 'Apparently-Resent-To', 'X-Envelope-To',
'Envelope-To',
'X-Delivered-To', 'X-Original-To', 'X-Rcpt-To', 'X-Real-To',
'Cc')
|
MarcJoan/django
|
refs/heads/master
|
tests/queryset_pickle/tests.py
|
209
|
from __future__ import unicode_literals
import datetime
import pickle
import unittest
import warnings
from django.test import TestCase
from django.utils import six
from django.utils.encoding import force_text
from django.utils.version import get_version
from .models import Container, Event, Group, Happening, M2MModel
class PickleabilityTestCase(TestCase):
def setUp(self):
Happening.objects.create() # make sure the defaults are working (#20158)
def assert_pickles(self, qs):
self.assertEqual(list(pickle.loads(pickle.dumps(qs))), list(qs))
def test_related_field(self):
g = Group.objects.create(name="Ponies Who Own Maybachs")
self.assert_pickles(Event.objects.filter(group=g.id))
def test_datetime_callable_default_all(self):
self.assert_pickles(Happening.objects.all())
def test_datetime_callable_default_filter(self):
self.assert_pickles(Happening.objects.filter(when=datetime.datetime.now()))
def test_string_as_default(self):
self.assert_pickles(Happening.objects.filter(name="test"))
def test_standalone_method_as_default(self):
self.assert_pickles(Happening.objects.filter(number1=1))
@unittest.skipIf(six.PY2, "Field doesn't exist on Python 2.")
def test_staticmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number2=1))
def test_filter_reverse_fk(self):
self.assert_pickles(Group.objects.filter(event=1))
def test_doesnotexist_exception(self):
# Ticket #17776
original = Event.DoesNotExist("Doesn't exist")
unpickled = pickle.loads(pickle.dumps(original))
# Exceptions are not equal to equivalent instances of themselves, so
# can't just use assertEqual(original, unpickled)
self.assertEqual(original.__class__, unpickled.__class__)
self.assertEqual(original.args, unpickled.args)
def test_manager_pickle(self):
pickle.loads(pickle.dumps(Happening.objects))
def test_model_pickle(self):
"""
Test that a model not defined on module level is pickleable.
"""
original = Container.SomeModel(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
# Also, deferred dynamic model works
Container.SomeModel.objects.create(somefield=1)
original = Container.SomeModel.objects.defer('somefield')[0]
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertEqual(original.somefield, reloaded.somefield)
def test_model_pickle_m2m(self):
"""
Test intentionally the automatically created through model.
"""
m1 = M2MModel.objects.create()
g1 = Group.objects.create(name='foof')
m1.groups.add(g1)
m2m_through = M2MModel._meta.get_field('groups').remote_field.through
original = m2m_through.objects.get()
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
def test_model_pickle_dynamic(self):
class Meta:
proxy = True
dynclass = type(str("DynamicEventSubclass"), (Event, ),
{'Meta': Meta, '__module__': Event.__module__})
original = dynclass(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertIs(reloaded.__class__, dynclass)
def test_specialized_queryset(self):
self.assert_pickles(Happening.objects.values('name'))
self.assert_pickles(Happening.objects.values('name').dates('when', 'year'))
# With related field (#14515)
self.assert_pickles(
Event.objects.select_related('group').order_by('title').values_list('title', 'group__name')
)
def test_pickle_prefetch_related_idempotence(self):
g = Group.objects.create(name='foo')
groups = Group.objects.prefetch_related('event_set')
# First pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
# Second pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertQuerysetEqual(groups, [g], lambda x: x)
def test_pickle_prefetch_related_with_m2m_and_objects_deletion(self):
"""
#24831 -- Cached properties on ManyToOneRel created in QuerySet.delete()
caused subsequent QuerySet pickling to fail.
"""
g = Group.objects.create(name='foo')
m2m = M2MModel.objects.create()
m2m.groups.add(g)
Group.objects.all().delete()
m2ms = M2MModel.objects.prefetch_related('groups')
m2ms = pickle.loads(pickle.dumps(m2ms))
self.assertQuerysetEqual(m2ms, [m2m], lambda x: x)
def test_missing_django_version_unpickling(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled without a Django version
"""
qs = Group.missing_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(msg,
"Pickled queryset instance's Django version is not specified.")
def test_unsupported_unpickle(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled with a different Django version than the current
"""
qs = Group.previous_django_version_objects.all()
with warnings.catch_warnings(record=True) as recorded:
pickle.loads(pickle.dumps(qs))
msg = force_text(recorded.pop().message)
self.assertEqual(
msg,
"Pickled queryset instance's Django version 1.0 does not "
"match the current version %s." % get_version()
)
|
owenmorris/pylucene
|
refs/heads/master
|
python/collections.py
|
7
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lucene import JArray
from java.lang import IllegalStateException, IndexOutOfBoundsException
from java.util import NoSuchElementException
from org.apache.pylucene.util import \
PythonSet, PythonList, PythonIterator, PythonListIterator
class JavaSet(PythonSet):
"""
This class implements java.util.Set around a Python set instance it wraps.
"""
def __init__(self, _set):
super(JavaSet, self).__init__()
self._set = _set
def __contains__(self, obj):
return obj in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._set)
def add(self, obj):
if obj not in self._set:
self._set.add(obj)
return True
return False
def addAll(self, collection):
size = len(self._set)
self._set.update(collection)
return len(self._set) > size
def clear(self):
self._set.clear()
def contains(self, obj):
return obj in self._set
def containsAll(self, collection):
for obj in collection:
if obj not in self._set:
return False
return True
def equals(self, collection):
if type(self) is type(collection):
return self._set == collection._set
return False
def isEmpty(self):
return len(self._set) == 0
def iterator(self):
class _iterator(PythonIterator):
def __init__(_self):
super(_iterator, _self).__init__()
_self._iterator = iter(self._set)
def hasNext(_self):
if hasattr(_self, '_next'):
return True
try:
_self._next = _self._iterator.next()
return True
except StopIteration:
return False
def next(_self):
if hasattr(_self, '_next'):
next = _self._next
del _self._next
else:
next = _self._iterator.next()
return next
return _iterator()
def remove(self, obj):
try:
self._set.remove(obj)
return True
except KeyError:
return False
def removeAll(self, collection):
result = False
for obj in collection:
try:
self._set.remove(obj)
result = True
except KeyError:
pass
return result
def retainAll(self, collection):
result = False
for obj in list(self._set):
if obj not in collection:
self._set.remove(obj)
result = True
return result
def size(self):
return len(self._set)
def toArray(self): # JavaSet
return list(self._set)
class JavaListIterator(PythonListIterator):
"""
This class implements java.util.ListIterator for a Python list instance it
wraps. (simple bidirectional iterator)
"""
def __init__(self, _lst, index=0):
super(JavaListIterator, self).__init__()
self._lst = _lst
self._lastIndex = -1 # keep state for remove/set
self.index = index
def next(self):
if self.index >= len(self._lst):
raise JavaError, NoSuchElementException(str(self.index))
result = self._lst[self.index]
self._lastIndex = self.index
self.index += 1
return result
def previous(self):
if self.index <= 0:
raise JavaError, NoSuchElementException(str(self.index - 1))
self.index -= 1
self._lastIndex = self.index
return self._lst[self.index]
def hasPrevious(self):
return self.index > 0
def hasNext(self):
return self.index < len(self._lst)
def nextIndex(self):
return min(self.index, len(self._lst))
def previousIndex(self):
return max(-1, self.index - 1)
def add(self, element):
"""
Inserts the specified element into the list.
The element is inserted immediately before the next element
that would be returned by next, if any, and after the next
element that would be returned by previous, if any.
"""
if self._lastIndex < 0:
raise JavaError, IllegalStateException("add")
self._lst.insert(self.index, element)
self.index += 1
self._lastIndex = -1 # invalidate state
def remove(self):
"""
Removes from the list the last element that
was returned by next or previous.
"""
if self._lastIndex < 0:
raise JavaError, IllegalStateException("remove")
del self._lst[self._lastIndex]
self._lastIndex = -1 # invalidate state
def set(self, element):
"""
Replaces the last element returned by next or previous
with the specified element.
"""
if self._lastIndex < 0:
raise JavaError, IllegalStateException("set")
self._lst[self._lastIndex] = element
def __iter__(self):
return self
class JavaList(PythonList):
"""
This class implements java.util.List around a Python list instance it wraps.
"""
def __init__(self, _lst):
super(JavaList, self).__init__()
self._lst = _lst
def __contains__(self, obj):
return obj in self._lst
def __len__(self):
return len(self._lst)
def __iter__(self):
return iter(self._lst)
def add(self, index, obj):
self._lst.insert(index, obj)
def addAll(self, collection):
size = len(self._lst)
self._lst.extend(collection)
return len(self._lst) > size
def addAll(self, index, collection):
size = len(self._lst)
self._lst[index:index] = collection
return len(self._lst) > size
def clear(self):
del self._lst[:]
def contains(self, obj):
return obj in self._lst
def containsAll(self, collection):
for obj in collection:
if obj not in self._lst:
return False
return True
def equals(self, collection):
if type(self) is type(collection):
return self._lst == collection._lst
return False
def get(self, index):
if index < 0 or index >= self.size():
raise JavaError, IndexOutOfBoundsException(str(index))
return self._lst[index]
def indexOf(self, obj):
try:
return self._lst.index(obj)
except ValueError:
return -1
def isEmpty(self):
return len(self._lst) == 0
def iterator(self):
class _iterator(PythonIterator):
def __init__(_self):
super(_iterator, _self).__init__()
_self._iterator = iter(self._lst)
def hasNext(_self):
if hasattr(_self, '_next'):
return True
try:
_self._next = _self._iterator.next()
return True
except StopIteration:
return False
def next(_self):
if hasattr(_self, '_next'):
next = _self._next
del _self._next
else:
next = _self._iterator.next()
return next
return _iterator()
def lastIndexOf(self, obj):
i = len(self._lst)-1
while (i>=0):
if obj.equals(self._lst[i]):
break
i -= 1
return i
def listIterator(self, index=0):
return JavaListIterator(self._lst, index)
def remove(self, obj_or_index):
if type(obj_or_index) is type(1):
return removeAt(int(obj_or_index))
return removeElement(obj_or_index)
def removeAt(self, pos):
"""
Removes the element at the specified position in this list.
Note: private method called from Java via remove(int index)
index is already checked (or IndexOutOfBoundsException thrown)
"""
try:
el = self._lst[pos]
del self._lst[pos]
return el
except IndexError:
# should not happen
return None
def removeObject(self, obj):
"""
Removes the first occurrence of the specified object
from this list, if it is present
"""
try:
self._lst.remove(obj)
return True
except ValueError:
return False
def removeAll(self, collection):
result = False
for obj in collection:
if self.removeElement(obj):
result = True
return result
def retainAll(self, collection):
result = False
for obj in self._lst:
if obj not in collection and self.removeElement(obj):
result = True
return result
def size(self):
return len(self._lst)
def toArray(self):
return self._lst
def subListChecked(self, fromIndex, toIndex):
"""
Note: private method called from Java via subList()
from/to index are already checked (or IndexOutOfBoundsException thrown)
also IllegalArgumentException is thronw if the endpoint indices
are out of order (fromIndex > toIndex)
"""
sublst = self._lst[fromIndex:toIndex]
return JavaList(sublst)
def set(self, index, obj):
if index < 0 or index >= self.size():
raise JavaError, IndexOutOfBoundsException(str(index))
self._lst[index] = obj
|
dguo-coursera/arcanist
|
refs/heads/master
|
externals/pep8/pep8.py
|
1
|
#!/usr/bin/env python
# pep8.py - Check Python source code formatting, according to PEP 8
# Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
# Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""
Check Python source code formatting, according to PEP 8.
For usage and a list of options, try this:
$ python pep8.py -h
This program and its regression test suite live here:
http://github.com/jcrocholl/pep8
Groups of errors and warnings:
E errors
W warnings
100 indentation
200 whitespace
300 blank lines
400 imports
500 line length
600 deprecation
700 statements
900 syntax error
"""
from __future__ import with_statement
__version__ = '1.5.7'
import os
import sys
import re
import time
import inspect
import keyword
import tokenize
from optparse import OptionParser
from fnmatch import fnmatch
try:
from configparser import RawConfigParser
from io import TextIOWrapper
except ImportError:
from ConfigParser import RawConfigParser
DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__'
DEFAULT_IGNORE = 'E123,E226,E24'
if sys.platform == 'win32':
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
MAX_LINE_LENGTH = 79
REPORT_FORMAT = {
'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
}
PyCF_ONLY_AST = 1024
SINGLETONS = frozenset(['False', 'None', 'True'])
KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-'])
WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
WS_NEEDED_OPERATORS = frozenset([
'**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
'%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
WHITESPACE = frozenset(' \t')
NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT])
# ERRORTOKEN is triggered by backticks in Python 3
SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN])
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
INDENT_REGEX = re.compile(r'([ \t]*)')
RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$')
ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
DOCSTRING_REGEX = re.compile(r'u?r?["\']')
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)')
COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^[({ ]+\s+(in|is)\s')
COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
r'|\s*\(\s*([^)]*[^ )])\s*\))')
KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
LAMBDA_REGEX = re.compile(r'\blambda\b')
HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
# Work around Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
##############################################################################
# Plugins (check functions) for physical lines
##############################################################################
def tabs_or_spaces(physical_line, indent_char):
r"""Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
of tabs and spaces should be converted to using spaces exclusively. When
invoking the Python command line interpreter with the -t option, it issues
warnings about code that illegally mixes tabs and spaces. When using -tt
these warnings become errors. These options are highly recommended!
Okay: if a == 0:\n a = 1\n b = 1
E101: if a == 0:\n a = 1\n\tb = 1
"""
indent = INDENT_REGEX.match(physical_line).group(1)
for offset, char in enumerate(indent):
if char != indent_char:
return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_obsolete(physical_line):
r"""For new projects, spaces-only are strongly recommended over tabs.
Okay: if True:\n return
W191: if True:\n\treturn
"""
indent = INDENT_REGEX.match(physical_line).group(1)
if '\t' in indent:
return indent.index('\t'), "W191 indentation contains tabs"
def trailing_whitespace(physical_line):
r"""Trailing whitespace is superfluous.
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace"
def trailing_blank_lines(physical_line, lines, line_number, total_lines):
r"""Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\n
However the last line should end with a new line (warning W292).
"""
if line_number == total_lines:
stripped_last_line = physical_line.rstrip()
if not stripped_last_line:
return 0, "W391 blank line at end of file"
if stripped_last_line == physical_line:
return len(physical_line), "W292 no newline at end of file"
def maximum_line_length(physical_line, max_line_length, multiline):
r"""Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
Reports error E501.
"""
line = physical_line.rstrip()
length = len(line)
if length > max_line_length and not noqa(line):
# Special case for long URLs in multi-line docstrings or comments,
# but still report the error when the 72 first chars are whitespaces.
chunks = line.split()
if ((len(chunks) == 1 and multiline) or
(len(chunks) == 2 and chunks[0] == '#')) and \
len(line) - len(chunks[-1]) < max_line_length - 7:
return
if hasattr(line, 'decode'): # Python 2
# The line could contain multi-byte characters
try:
length = len(line.decode('utf-8'))
except UnicodeError:
pass
if length > max_line_length:
return (max_line_length, "E501 line too long "
"(%d > %d characters)" % (length, max_line_length))
##############################################################################
# Plugins (check functions) for logical lines
##############################################################################
def blank_lines(logical_line, blank_lines, indent_level, line_number,
blank_before, previous_logical, previous_indent_level):
r"""Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
Okay: def a():\n pass\n\n\ndef b():\n pass
Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass
E301: class Foo:\n b = 0\n def bar():\n pass
E302: def a():\n pass\n\ndef b(n):\n pass
E303: def a():\n pass\n\n\n\ndef b(n):\n pass
E303: def a():\n\n\n\n pass
E304: @decorator\n\ndef a():\n pass
"""
if line_number < 3 and not previous_logical:
return # Don't expect blank lines before the first line
if previous_logical.startswith('@'):
if blank_lines:
yield 0, "E304 blank lines found after function decorator"
elif blank_lines > 2 or (indent_level and blank_lines == 2):
yield 0, "E303 too many blank lines (%d)" % blank_lines
elif logical_line.startswith(('def ', 'class ', '@')):
if indent_level:
if not (blank_before or previous_indent_level < indent_level or
DOCSTRING_REGEX.match(previous_logical)):
yield 0, "E301 expected 1 blank line, found 0"
elif blank_before != 2:
yield 0, "E302 expected 2 blank lines, found %d" % blank_before
def extraneous_whitespace(logical_line):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in these situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
"""
line = logical_line
for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
text = match.group()
char = text.strip()
found = match.start()
if text == char + ' ':
# assert char in '([{'
yield found + 1, "E201 whitespace after '%s'" % char
elif line[found - 1] != ',':
code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
yield found, "%s whitespace before '%s'" % (code, char)
def whitespace_around_keywords(logical_line):
r"""Avoid extraneous whitespace around keywords.
Okay: True and False
E271: True and False
E272: True and False
E273: True and\tFalse
E274: True\tand False
"""
for match in KEYWORD_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E274 tab before keyword"
elif len(before) > 1:
yield match.start(1), "E272 multiple spaces before keyword"
if '\t' in after:
yield match.start(2), "E273 tab after keyword"
elif len(after) > 1:
yield match.start(2), "E271 multiple spaces after keyword"
def missing_whitespace(logical_line):
r"""Each comma, semicolon or colon should be followed by whitespace.
Okay: [a, b]
Okay: (3,)
Okay: a[1:4]
Okay: a[:4]
Okay: a[1:]
Okay: a[1:4:2]
E231: ['a','b']
E231: foo(bar,baz)
E231: [{'a':'b'}]
"""
line = logical_line
for index in range(len(line) - 1):
char = line[index]
if char in ',;:' and line[index + 1] not in WHITESPACE:
before = line[:index]
if char == ':' and before.count('[') > before.count(']') and \
before.rfind('{') < before.rfind('['):
continue # Slice syntax, no space required
if char == ',' and line[index + 1] == ')':
continue # Allow tuple with only one element: (3,)
yield index, "E231 missing whitespace after '%s'" % char
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
r"""Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
Okay: a = 1
Okay: if a == 0:\n a = 1
E111: a = 1
Okay: for item in items:\n pass
E112: for item in items:\npass
Okay: a = 1\nb = 2
E113: a = 1\n b = 2
"""
if indent_char == ' ' and indent_level % 4:
yield 0, "E111 indentation is not a multiple of four"
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
yield 0, "E112 expected an indented block"
if indent_level > previous_indent_level and not indent_expect:
yield 0, "E113 unexpected indentation"
def continued_indentation(logical_line, tokens, indent_level, hang_closing,
indent_char, noqa, verbose):
r"""Continuation lines indentation.
Continuation lines should align wrapped elements either vertically
using Python's implicit line joining inside parentheses, brackets
and braces, or using a hanging indent.
When using a hanging indent these considerations should be applied:
- there should be no arguments on the first line, and
- further indentation should be used to clearly distinguish itself as a
continuation line.
Okay: a = (\n)
E123: a = (\n )
Okay: a = (\n 42)
E121: a = (\n 42)
E122: a = (\n42)
E123: a = (\n 42\n )
E124: a = (24,\n 42\n)
E125: if (\n b):\n pass
E126: a = (\n 42)
E127: a = (24,\n 42)
E128: a = (24,\n 42)
E129: if (a or\n b):\n pass
E131: a = (\n 42\n 24)
"""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented; assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line; in turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (4,) if indent_char != '\t' else (4, 8)
# remember how many brackets were opened on each line
parens = [0] * nrows
# relative indents of physical lines
rel_indent = [0] * nrows
# for each depth, collect a list of opening rows
open_rows = [[0]]
# for each depth, memorize the hanging indentation
hangs = [None]
# visual indents
indent_chances = {}
last_indent = tokens[0][2]
visual_indent = None
# for each depth, memorize the visual indent column
indent = [last_indent[1]]
if verbose >= 3:
print(">>> " + tokens[0][4].rstrip())
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = not last_token_multiline and token_type not in NEWLINE
if newline:
# this is the beginning of a continuation line.
last_indent = start
if verbose >= 3:
print("... " + line.rstrip())
# record the initial indent.
rel_indent[row] = expand_indent(line) - indent_level
# identify closing bracket
close_bracket = (token_type == tokenize.OP and text in ']})')
# is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
# is there any chance of visual indent?
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# closing bracket for visual indent
if start[1] != indent[depth]:
yield (start, "E124 closing bracket does not match "
"visual indentation")
elif close_bracket and not hang:
# closing bracket matches indentation of opening bracket's line
if hang_closing:
yield start, "E133 closing bracket is missing indentation"
elif indent[depth] and start[1] < indent[depth]:
if visual_indent is not True:
# visual indent is broken
yield (start, "E128 continuation line "
"under-indented for visual indent")
elif hanging_indent or (indent_next and rel_indent[row] == 8):
# hanging indent is verified
if close_bracket and not hang_closing:
yield (start, "E123 closing bracket does not match "
"indentation of opening bracket's line")
hangs[depth] = hang
elif visual_indent is True:
# visual indent is verified
indent[depth] = start[1]
elif visual_indent in (text, str):
# ignore token lined up with matching one from a previous line
pass
else:
# indent is broken
if hang <= 0:
error = "E122", "missing indentation or outdented"
elif indent[depth]:
error = "E127", "over-indented for visual indent"
elif not close_bracket and hangs[depth]:
error = "E131", "unaligned for hanging indent"
else:
hangs[depth] = hang
if hang > 4:
error = "E126", "over-indented for hanging indent"
else:
error = "E121", "under-indented for hanging indent"
yield start, "%s continuation line %s" % error
# look for visual indenting
if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
and not indent[depth]):
indent[depth] = start[1]
indent_chances[start[1]] = True
if verbose >= 4:
print("bracket depth %s indent to %s" % (depth, start[1]))
# deal with implicit string concatenation
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = str
# special case for the "if" statement because len("if (") == 4
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# keep track of bracket depth
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
if verbose >= 4:
print("bracket depth %s seen, col %s, visual min = %s" %
(depth, start[1], indent[depth]))
elif text in ')]}' and depth > 0:
# parent indents should not be more than this one
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if start[1] not in indent_chances:
# allow to line up tokens
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
if indent_next and expand_indent(line) == indent_level + 4:
pos = (start[0], indent[0] + 4)
if visual_indent:
code = "E129 visually indented line"
else:
code = "E125 continuation line"
yield pos, "%s with same indent as next logical line" % code
def whitespace_before_parameters(logical_line, tokens):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in the following situations:
- before the open parenthesis that starts the argument list of a
function call.
- before the open parenthesis that starts an indexing or slicing.
Okay: spam(1)
E211: spam (1)
Okay: dict['key'] = list[index]
E211: dict ['key'] = list[index]
E211: dict['key'] = list [index]
"""
prev_type, prev_text, __, prev_end, __ = tokens[0]
for index in range(1, len(tokens)):
token_type, text, start, end, __ = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
(prev_type == tokenize.NAME or prev_text in '}])') and
# Syntax "class A (B):" is allowed, but avoid it
(index < 2 or tokens[index - 2][1] != 'class') and
# Allow "return (a.foo for a in range(5))"
not keyword.iskeyword(prev_text)):
yield prev_end, "E211 whitespace before '%s'" % text
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_operator(logical_line):
r"""Avoid extraneous whitespace around an operator.
Okay: a = 12 + 3
E221: a = 4 + 5
E222: a = 4 + 5
E223: a = 4\t+ 5
E224: a = 4 +\t5
"""
for match in OPERATOR_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E223 tab before operator"
elif len(before) > 1:
yield match.start(1), "E221 multiple spaces before operator"
if '\t' in after:
yield match.start(2), "E224 tab after operator"
elif len(after) > 1:
yield match.start(2), "E222 multiple spaces after operator"
def missing_whitespace_around_operator(logical_line, tokens):
r"""Surround operators with a single space on either side.
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- If operators with different priorities are used, consider adding
whitespace around the operators with the lowest priorities.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: alpha[:-i]
E225: i=i+1
E225: submitted +=1
E225: x = x /2 - 1
E225: z = x **y
E226: c = (a+b) * (a-b)
E226: hypot2 = x*x + y*y
E227: c = a|b
E228: msg = fmt%(errno, errmsg)
"""
parens = 0
need_space = False
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
if token_type in SKIP_COMMENTS:
continue
if text in ('(', 'lambda'):
parens += 1
elif text == ')':
parens -= 1
if need_space:
if start != prev_end:
# Found a (probably) needed space
if need_space is not True and not need_space[1]:
yield (need_space[0],
"E225 missing whitespace around operator")
need_space = False
elif text == '>' and prev_text in ('<', '-'):
# Tolerate the "<>" operator, even if running Python 3
# Deal with Python 3's annotated return value "->"
pass
else:
if need_space is True or need_space[1]:
# A needed trailing space was not found
yield prev_end, "E225 missing whitespace around operator"
else:
code, optype = 'E226', 'arithmetic'
if prev_text == '%':
code, optype = 'E228', 'modulo'
elif prev_text not in ARITHMETIC_OP:
code, optype = 'E227', 'bitwise or shift'
yield (need_space[0], "%s missing whitespace "
"around %s operator" % (code, optype))
need_space = False
elif token_type == tokenize.OP and prev_end is not None:
if text == '=' and parens:
# Allow keyword args or defaults: foo(bar=None).
pass
elif text in WS_NEEDED_OPERATORS:
need_space = True
elif text in UNARY_OPERATORS:
# Check if the operator is being used as a binary operator
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
if (prev_text in '}])' if prev_type == tokenize.OP
else prev_text not in KEYWORDS):
need_space = None
elif text in WS_OPTIONAL_OPERATORS:
need_space = None
if need_space is None:
# Surrounding space is optional, but ensure that
# trailing space matches opening space
need_space = (prev_end, start != prev_end)
elif need_space and start == prev_end:
# A needed opening space was not found
yield prev_end, "E225 missing whitespace around operator"
need_space = False
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_comma(logical_line):
r"""Avoid extraneous whitespace after a comma or a colon.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0]
def whitespace_around_named_parameter_equals(logical_line, tokens):
r"""Don't use spaces around the '=' sign in function arguments.
Don't use spaces around the '=' sign when used to indicate a
keyword argument or a default parameter value.
Okay: def complex(real, imag=0.0):
Okay: return magic(r=real, i=imag)
Okay: boolean(a == b)
Okay: boolean(a != b)
Okay: boolean(a <= b)
Okay: boolean(a >= b)
E251: def complex(real, imag = 0.0):
E251: return magic(r = real, i = imag)
"""
parens = 0
no_space = False
prev_end = None
message = "E251 unexpected spaces around keyword / parameter equals"
for token_type, text, start, end, line in tokens:
if token_type == tokenize.NL:
continue
if no_space:
no_space = False
if start != prev_end:
yield (prev_end, message)
elif token_type == tokenize.OP:
if text == '(':
parens += 1
elif text == ')':
parens -= 1
elif parens and text == '=':
no_space = True
if start != prev_end:
yield (prev_end, message)
prev_end = end
def whitespace_before_comment(logical_line, tokens):
r"""Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Each line of a block comment starts with a # and a single space
(unless it is indented text inside the comment).
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
Okay: # Block comment
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
E265: #Block comment
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
inline_comment = line[:start[1]].strip()
if inline_comment:
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
yield (prev_end,
"E261 at least two spaces before inline comment")
symbol, sp, comment = text.partition(' ')
bad_prefix = symbol not in ('#', '#:')
if inline_comment:
if bad_prefix or comment[:1].isspace():
yield start, "E262 inline comment should start with '# '"
elif bad_prefix:
if text.rstrip('#') and (start[0] > 1 or symbol[1] != '!'):
yield start, "E265 block comment should start with '# '"
elif token_type != tokenize.NL:
prev_end = end
def imports_on_separate_lines(logical_line):
r"""Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
Okay: from subprocess import Popen, PIPE
Okay: from myclas import MyClass
Okay: from foo.bar.yourclass import YourClass
Okay: import myclass
Okay: import foo.bar.yourclass
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if -1 < found and ';' not in line[:found]:
yield found, "E401 multiple imports on one line"
def compound_statements(logical_line):
r"""Compound statements (on the same line) are generally discouraged.
While sometimes it's okay to put an if/for/while with a small body
on the same line, never do this for multi-clause statements.
Also avoid folding such long lines!
Okay: if foo == 'blah':\n do_blah_thing()
Okay: do_one()
Okay: do_two()
Okay: do_three()
E701: if foo == 'blah': do_blah_thing()
E701: for x in lst: total += x
E701: while t < 10: t = delay()
E701: if foo == 'blah': do_blah_thing()
E701: else: do_non_blah_thing()
E701: try: something()
E701: finally: cleanup()
E701: if foo == 'blah': one(); two(); three()
E702: do_one(); do_two(); do_three()
E703: do_four(); # useless semicolon
"""
line = logical_line
last_char = len(line) - 1
found = line.find(':')
while -1 < found < last_char:
before = line[:found]
if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
before.count('(') <= before.count(')') and # (Python 3 annotation)
not LAMBDA_REGEX.search(before)): # lambda x: x
yield found, "E701 multiple statements on one line (colon)"
found = line.find(':', found + 1)
found = line.find(';')
while -1 < found:
if found < last_char:
yield found, "E702 multiple statements on one line (semicolon)"
else:
yield found, "E703 statement ends with a semicolon"
found = line.find(';', found + 1)
def explicit_line_join(logical_line, tokens):
r"""Avoid explicit line join between brackets.
The preferred way of wrapping long lines is by using Python's implied line
continuation inside parentheses, brackets and braces. Long lines can be
broken over multiple lines by wrapping expressions in parentheses. These
should be used in preference to using a backslash for line continuation.
E502: aaa = [123, \\n 123]
E502: aaa = ("bbb " \\n "ccc")
Okay: aaa = [123,\n 123]
Okay: aaa = ("bbb "\n "ccc")
Okay: aaa = "bbb " \\n "ccc"
"""
prev_start = prev_end = parens = 0
for token_type, text, start, end, line in tokens:
if start[0] != prev_start and parens and backslash:
yield backslash, "E502 the backslash is redundant between brackets"
if end[0] != prev_end:
if line.rstrip('\r\n').endswith('\\'):
backslash = (end[0], len(line.splitlines()[-1]) - 1)
else:
backslash = None
prev_start = prev_end = end[0]
else:
prev_start = start[0]
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in ')]}':
parens -= 1
def comparison_to_singleton(logical_line, noqa):
r"""Comparison to singletons should use "is" or "is not".
Comparisons to singletons like None should always be done
with "is" or "is not", never the equality operators.
Okay: if arg is not None:
E711: if arg != None:
E712: if arg == True:
Also, beware of writing if x when you really mean if x is not None --
e.g. when testing whether a variable or argument that defaults to None was
set to some other value. The other value might have a type (such as a
container) that could be false in a boolean context!
"""
match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
if match:
same = (match.group(1) == '==')
singleton = match.group(2)
msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
if singleton in ('None',):
code = 'E711'
else:
code = 'E712'
nonzero = ((singleton == 'True' and same) or
(singleton == 'False' and not same))
msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
yield match.start(1), ("%s comparison to %s should be %s" %
(code, singleton, msg))
def comparison_negative(logical_line):
r"""Negative comparison should be done using "not in" and "is not".
Okay: if x not in y:\n pass
Okay: assert (X in Y or X is Z)
Okay: if not (X in Y):\n pass
Okay: zz = x is not y
E713: Z = not X in Y
E713: if not X.B in Y:\n pass
E714: if not X is Y:\n pass
E714: Z = not X.B is Y
"""
match = COMPARE_NEGATIVE_REGEX.search(logical_line)
if match:
pos = match.start(1)
if match.group(2) == 'in':
yield pos, "E713 test for membership should be 'not in'"
else:
yield pos, "E714 test for object identity should be 'is not'"
def comparison_type(logical_line):
r"""Object type comparisons should always use isinstance().
Do not compare types directly.
Okay: if isinstance(obj, int):
E721: if type(obj) is type(1):
When checking if an object is a string, keep in mind that it might be a
unicode string too! In Python 2.3, str and unicode have a common base
class, basestring, so you can do:
Okay: if isinstance(obj, basestring):
Okay: if type(a1) is type(b1):
"""
match = COMPARE_TYPE_REGEX.search(logical_line)
if match:
inst = match.group(1)
if inst and isidentifier(inst) and inst not in SINGLETONS:
return # Allow comparison for types which are not obvious
yield match.start(), "E721 do not compare types, use 'isinstance()'"
def python_3000_has_key(logical_line, noqa):
r"""The {}.has_key() method is removed in Python 3: use the 'in' operator.
Okay: if "alph" in d:\n print d["alph"]
W601: assert d.has_key('alph')
"""
pos = logical_line.find('.has_key(')
if pos > -1 and not noqa:
yield pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
r"""When raising an exception, use "raise ValueError('message')".
The older form is removed in Python 3.
Okay: raise DummyError("Message")
W602: raise DummyError, "Message"
"""
match = RAISE_COMMA_REGEX.match(logical_line)
if match and not RERAISE_COMMA_REGEX.match(logical_line):
yield match.end() - 1, "W602 deprecated form of raising exception"
def python_3000_not_equal(logical_line):
r"""New code should always use != instead of <>.
The older syntax is removed in Python 3.
Okay: if a != 'no':
W603: if a <> 'no':
"""
pos = logical_line.find('<>')
if pos > -1:
yield pos, "W603 '<>' is deprecated, use '!='"
def python_3000_backticks(logical_line):
r"""Backticks are removed in Python 3: use repr() instead.
Okay: val = repr(1 + 2)
W604: val = `1 + 2`
"""
pos = logical_line.find('`')
if pos > -1:
yield pos, "W604 backticks are deprecated, use 'repr()'"
##############################################################################
# Helper functions
##############################################################################
if '' == ''.encode():
# Python 2: implicit encoding.
def readlines(filename):
"""Read the source code."""
with open(filename, 'rU') as f:
return f.readlines()
isidentifier = re.compile(r'[a-zA-Z_]\w*').match
stdin_get_value = sys.stdin.read
else:
# Python 3
def readlines(filename):
"""Read the source code."""
try:
with open(filename, 'rb') as f:
(coding, lines) = tokenize.detect_encoding(f.readline)
f = TextIOWrapper(f, coding, line_buffering=True)
return [l.decode(coding) for l in lines] + f.readlines()
except (LookupError, SyntaxError, UnicodeError):
# Fall back if file encoding is improperly declared
with open(filename, encoding='latin-1') as f:
return f.readlines()
isidentifier = str.isidentifier
def stdin_get_value():
return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
def expand_indent(line):
r"""Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
if '\t' not in line:
return len(line) - len(line.lstrip())
result = 0
for char in line:
if char == '\t':
result = result // 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
def mute_string(text):
"""Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
# String modifiers (e.g. u or r)
start = text.index(text[-1]) + 1
end = len(text) - 1
# Triple quotes
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
def parse_udiff(diff, patterns=None, parent='.'):
"""Return a dictionary of matching lines."""
# For each file of the diff, the entry key is the filename,
# and the value is a set of row numbers to consider.
rv = {}
path = nrows = None
for line in diff.splitlines():
if nrows:
if line[:1] != '-':
nrows -= 1
continue
if line[:3] == '@@ ':
hunk_match = HUNK_REGEX.match(line)
(row, nrows) = [int(g or '1') for g in hunk_match.groups()]
rv[path].update(range(row, row + nrows))
elif line[:3] == '+++':
path = line[4:].split('\t', 1)[0]
if path[:2] == 'b/':
path = path[2:]
rv[path] = set()
return dict([(os.path.join(parent, path), rows)
for (path, rows) in rv.items()
if rows and filename_match(path, patterns)])
def normalize_paths(value, parent=os.curdir):
"""Parse a comma-separated list of paths.
Return a list of absolute paths.
"""
if not value or isinstance(value, list):
return value
paths = []
for path in value.split(','):
if '/' in path:
path = os.path.abspath(os.path.join(parent, path))
paths.append(path.rstrip('/'))
return paths
def filename_match(filename, patterns, default=True):
"""Check if patterns contains a pattern that matches filename.
If patterns is unspecified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
if COMMENT_WITH_NL:
def _is_eol_token(token):
return (token[0] in NEWLINE or
(token[0] == tokenize.COMMENT and token[1] == token[4]))
else:
def _is_eol_token(token):
return token[0] in NEWLINE
##############################################################################
# Framework to run all checks
##############################################################################
_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
def register_check(check, codes=None):
"""Register a new check object."""
def _add_check(check, kind, codes, args):
if check in _checks[kind]:
_checks[kind][check][0].extend(codes or [])
else:
_checks[kind][check] = (codes or [''], args)
if inspect.isfunction(check):
args = inspect.getargspec(check)[0]
if args and args[0] in ('physical_line', 'logical_line'):
if codes is None:
codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
_add_check(check, args[0], codes, args)
elif inspect.isclass(check):
if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']:
_add_check(check, 'tree', codes, None)
def init_checks_registry():
"""Register all globally visible functions.
The first argument name is either 'physical_line' or 'logical_line'.
"""
mod = inspect.getmodule(register_check)
for (name, function) in inspect.getmembers(mod, inspect.isfunction):
register_check(function)
init_checks_registry()
class Checker(object):
"""Load a Python source file, tokenize it, check coding style."""
def __init__(self, filename=None, lines=None,
options=None, report=None, **kwargs):
if options is None:
options = StyleGuide(kwargs).options
else:
assert not kwargs
self._io_error = None
self._physical_checks = options.physical_checks
self._logical_checks = options.logical_checks
self._ast_checks = options.ast_checks
self.max_line_length = options.max_line_length
self.multiline = False # in a multiline string?
self.hang_closing = options.hang_closing
self.verbose = options.verbose
self.filename = filename
if filename is None:
self.filename = 'stdin'
self.lines = lines or []
elif filename == '-':
self.filename = 'stdin'
self.lines = stdin_get_value().splitlines(True)
elif lines is None:
try:
self.lines = readlines(filename)
except IOError:
(exc_type, exc) = sys.exc_info()[:2]
self._io_error = '%s: %s' % (exc_type.__name__, exc)
self.lines = []
else:
self.lines = lines
if self.lines:
ord0 = ord(self.lines[0][0])
if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
if ord0 == 0xfeff:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == '\xef\xbb\xbf':
self.lines[0] = self.lines[0][3:]
self.report = report or options.report
self.report_error = self.report.error
def report_invalid_syntax(self):
"""Check if the syntax is valid."""
(exc_type, exc) = sys.exc_info()[:2]
if len(exc.args) > 1:
offset = exc.args[1]
if len(offset) > 2:
offset = offset[1:3]
else:
offset = (1, 0)
self.report_error(offset[0], offset[1] or 0,
'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
self.report_invalid_syntax)
def readline(self):
"""Get the next line from the input buffer."""
if self.line_number >= self.total_lines:
return ''
line = self.lines[self.line_number]
self.line_number += 1
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
return line
def run_check(self, check, argument_names):
"""Run a check plugin."""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def check_physical(self, line):
"""Run all physical checks on a raw input line."""
self.physical_line = line
for name, check, argument_names in self._physical_checks:
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == 'E101':
self.indent_char = line[0]
def build_tokens_line(self):
"""Build a logical line from tokens."""
logical = []
comments = []
length = 0
prev_row = prev_col = mapping = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if prev_row:
(start_row, start_col) = start
if prev_row != start_row: # different row
prev_text = self.lines[prev_row - 1][prev_col - 1]
if prev_text == ',' or (prev_text not in '{[('
and text not in '}])'):
text = ' ' + text
elif prev_col != start_col: # different column
text = line[prev_col:start_col] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(prev_row, prev_col) = end
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
return mapping
def check_logical(self):
"""Build a line from tokens and run all logical checks on it."""
self.report.increment_logical_line()
mapping = self.build_tokens_line()
(start_row, start_col) = mapping[0][1]
start_line = self.lines[start_row - 1]
self.indent_level = expand_indent(start_line[:start_col])
if self.blank_before < self.blank_lines:
self.blank_before = self.blank_lines
if self.verbose >= 2:
print(self.logical_line[:80].rstrip())
for name, check, argument_names in self._logical_checks:
if self.verbose >= 4:
print(' ' + name)
for offset, text in self.run_check(check, argument_names) or ():
if not isinstance(offset, tuple):
for token_offset, pos in mapping:
if offset <= token_offset:
break
offset = (pos[0], pos[1] + offset - token_offset)
self.report_error(offset[0], offset[1], text, check)
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
self.blank_lines = 0
self.tokens = []
def check_ast(self):
"""Build the file's AST and run all AST checks."""
try:
tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
except (SyntaxError, TypeError):
return self.report_invalid_syntax()
for name, cls, __ in self._ast_checks:
checker = cls(tree, self.filename)
for lineno, offset, text, check in checker.run():
if not self.lines or not noqa(self.lines[lineno - 1]):
self.report_error(lineno, offset, text, check)
def generate_tokens(self):
"""Tokenize the file, run physical line checks and yield tokens."""
if self._io_error:
self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
tokengen = tokenize.generate_tokens(self.readline)
try:
for token in tokengen:
if token[2][0] > self.total_lines:
return
self.maybe_check_physical(token)
yield token
except (SyntaxError, tokenize.TokenError):
self.report_invalid_syntax()
def maybe_check_physical(self, token):
"""If appropriate (based on token), check current physical line(s)."""
# Called after every token, but act only on end of line.
if _is_eol_token(token):
# Obviously, a newline token ends a single physical line.
self.check_physical(token[4])
elif token[0] == tokenize.STRING and '\n' in token[1]:
# Less obviously, a string that contains newlines is a
# multiline string, either triple-quoted or with internal
# newlines backslash-escaped. Check every physical line in the
# string *except* for the last one: its newline is outside of
# the multiline string, so we consider it a regular physical
# line, and will check it like any other physical line.
#
# Subtleties:
# - we don't *completely* ignore the last line; if it contains
# the magical "# noqa" comment, we disable all physical
# checks for the entire multiline string
# - have to wind self.line_number back because initially it
# points to the last line of the string, and we want
# check_physical() to give accurate feedback
if noqa(token[4]):
return
self.multiline = True
self.line_number = token[2][0]
for line in token[1].split('\n')[:-1]:
self.check_physical(line + '\n')
self.line_number += 1
self.multiline = False
def check_all(self, expected=None, line_offset=0):
"""Run all checks on the input file."""
self.report.init_file(self.filename, self.lines, expected, line_offset)
self.total_lines = len(self.lines)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = self.previous_indent_level = 0
self.previous_logical = ''
self.tokens = []
self.blank_lines = self.blank_before = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type in NEWLINE:
if token_type == tokenize.NEWLINE:
self.check_logical()
self.blank_before = 0
elif len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
del self.tokens[0]
else:
self.check_logical()
elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
if len(self.tokens) == 1:
# The comment also ends a physical line
token = list(token)
token[1] = text.rstrip('\r\n')
token[3] = (token[2][0], token[2][1] + len(token[1]))
self.tokens = [tuple(token)]
self.check_logical()
if self.tokens:
self.check_physical(self.lines[-1])
self.check_logical()
return self.report.get_file_results()
class BaseReport(object):
"""Collect the results of the checks."""
print_filename = False
def __init__(self, options):
self._benchmark_keys = options.benchmark_keys
self._ignore_code = options.ignore_code
# Results
self.elapsed = 0
self.total_errors = 0
self.counters = dict.fromkeys(self._benchmark_keys, 0)
self.messages = {}
def start(self):
"""Start the timer."""
self._start_time = time.time()
def stop(self):
"""Stop the timer."""
self.elapsed = time.time() - self._start_time
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self.filename = filename
self.lines = lines
self.expected = expected or ()
self.line_offset = line_offset
self.file_errors = 0
self.counters['files'] += 1
self.counters['physical lines'] += len(lines)
def increment_logical_line(self):
"""Signal a new logical line."""
self.counters['logical lines'] += 1
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = text[:4]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = text[5:]
# Don't care about expected errors or warnings
if code in self.expected:
return
if self.print_filename and not self.file_errors:
print(self.filename)
self.file_errors += 1
self.total_errors += 1
return code
def get_file_results(self):
"""Return the count of errors and warnings for this file."""
return self.file_errors
def get_count(self, prefix=''):
"""Return the total count of errors and warnings."""
return sum([self.counters[key]
for key in self.messages if key.startswith(prefix)])
def get_statistics(self, prefix=''):
"""Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
for key in sorted(self.messages) if key.startswith(prefix)]
def print_statistics(self, prefix=''):
"""Print overall statistics (number of errors and warnings)."""
for line in self.get_statistics(prefix):
print(line)
def print_benchmark(self):
"""Print benchmark numbers."""
print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
if self.elapsed:
for key in self._benchmark_keys:
print('%-7d %s per second (%d total)' %
(self.counters[key] / self.elapsed, key,
self.counters[key]))
class FileReport(BaseReport):
"""Collect the results of the checks and print only the filenames."""
print_filename = True
class StandardReport(BaseReport):
"""Collect and print the results of the checks."""
def __init__(self, options):
super(StandardReport, self).__init__(options)
self._fmt = REPORT_FORMAT.get(options.format.lower(),
options.format)
self._repeat = options.repeat
self._show_source = options.show_source
self._show_pep8 = options.show_pep8
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self._deferred_print = []
return super(StandardReport, self).init_file(
filename, lines, expected, line_offset)
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code
def get_file_results(self):
"""Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
print(re.sub(r'\S', ' ', line[:offset]) + '^')
if self._show_pep8 and doc:
print(' ' + doc.strip())
return self.file_errors
class DiffReport(StandardReport):
"""Collect and print the results for the changed lines only."""
def __init__(self, options):
super(DiffReport, self).__init__(options)
self._selected = options.selected_lines
def error(self, line_number, offset, text, check):
if line_number not in self._selected[self.filename]:
return
return super(DiffReport, self).error(line_number, offset, text, check)
class StyleGuide(object):
"""Initialize a PEP-8 instance with few options."""
def __init__(self, *args, **kwargs):
# build options from the command line
self.checker_class = kwargs.pop('checker_class', Checker)
parse_argv = kwargs.pop('parse_argv', False)
config_file = kwargs.pop('config_file', None)
parser = kwargs.pop('parser', None)
# build options from dict
options_dict = dict(*args, **kwargs)
arglist = None if parse_argv else options_dict.get('paths', None)
options, self.paths = process_options(
arglist, parse_argv, config_file, parser)
if options_dict:
options.__dict__.update(options_dict)
if 'paths' in options_dict:
self.paths = options_dict['paths']
self.runner = self.input_file
self.options = options
if not options.reporter:
options.reporter = BaseReport if options.quiet else StandardReport
options.select = tuple(options.select or ())
if not (options.select or options.ignore or
options.testsuite or options.doctest) and DEFAULT_IGNORE:
# The default choice: ignore controversial checks
options.ignore = tuple(DEFAULT_IGNORE.split(','))
else:
# Ignore all checks which are not explicitly selected
options.ignore = ('',) if options.select else tuple(options.ignore)
options.benchmark_keys = BENCHMARK_KEYS[:]
options.ignore_code = self.ignore_code
options.physical_checks = self.get_checks('physical_line')
options.logical_checks = self.get_checks('logical_line')
options.ast_checks = self.get_checks('tree')
self.init_report()
def init_report(self, reporter=None):
"""Initialize the report instance."""
self.options.report = (reporter or self.options.reporter)(self.options)
return self.options.report
def check_files(self, paths=None):
"""Run all checks on the paths."""
if paths is None:
paths = self.paths
report = self.options.report
runner = self.runner
report.start()
try:
for path in paths:
if os.path.isdir(path):
self.input_dir(path)
elif not self.excluded(path):
runner(path)
except KeyboardInterrupt:
print('... stopped')
report.stop()
return report
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
return fchecker.check_all(expected=expected, line_offset=line_offset)
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((filename_match(filename, filepatterns) and
not self.excluded(filename, root))):
runner(os.path.join(root, filename))
def excluded(self, filename, parent=None):
"""Check if the file should be excluded.
Check if 'options.exclude' contains a pattern that matches filename.
"""
if not self.options.exclude:
return False
basename = os.path.basename(filename)
if filename_match(basename, self.options.exclude):
return True
if parent:
filename = os.path.join(parent, filename)
filename = os.path.abspath(filename)
return filename_match(filename, self.options.exclude)
def ignore_code(self, code):
"""Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True.
"""
if len(code) < 4 and any(s.startswith(code)
for s in self.options.select):
return False
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select))
def get_checks(self, argument_name):
"""Get all the checks for this category.
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests.
"""
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks)
def get_parser(prog='pep8', version=__version__):
parser = OptionParser(prog=prog, version=version,
usage="%prog [options] input ...")
parser.config_options = [
'exclude', 'filename', 'select', 'ignore', 'max-line-length',
'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
'show-source', 'statistics', 'verbose']
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
parser.add_option('-r', '--repeat', default=True, action='store_true',
help="(obsolete) show all occurrences of the same error")
parser.add_option('--first', action='store_false', dest='repeat',
help="show first occurrence of each error")
parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %default)")
parser.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns "
"(default: %default)")
parser.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
parser.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W)")
parser.add_option('--show-source', action='store_true',
help="show source code for each error")
parser.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error "
"(implies --first)")
parser.add_option('--statistics', action='store_true',
help="count errors and warnings")
parser.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
parser.add_option('--max-line-length', type='int', metavar='n',
default=MAX_LINE_LENGTH,
help="set maximum allowed line length "
"(default: %default)")
parser.add_option('--hang-closing', action='store_true',
help="hang closing bracket instead of matching "
"indentation of opening bracket's line")
parser.add_option('--format', metavar='format', default='default',
help="set the error format [default|pylint|<custom>]")
parser.add_option('--diff', action='store_true',
help="report only lines changed according to the "
"unified diff received on STDIN")
group = parser.add_option_group("Testing Options")
if os.path.exists(TESTSUITE_PATH):
group.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
group.add_option('--doctest', action='store_true',
help="run doctest on myself")
group.add_option('--benchmark', action='store_true',
help="measure processing speed")
return parser
def read_config(options, args, arglist, parser):
"""Read both user configuration and local configuration."""
config = RawConfigParser()
user_conf = options.config
if user_conf and os.path.isfile(user_conf):
if options.verbose:
print('user configuration: %s' % user_conf)
config.read(user_conf)
local_dir = os.curdir
parent = tail = args and os.path.abspath(os.path.commonprefix(args))
while tail:
if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]):
local_dir = parent
if options.verbose:
print('local configuration: in %s' % parent)
break
(parent, tail) = os.path.split(parent)
pep8_section = parser.prog
if config.has_section(pep8_section):
option_list = dict([(o.dest, o.type or o.action)
for o in parser.option_list])
# First, read the default values
(new_options, __) = parser.parse_args([])
# Second, parse the configuration
for opt in config.options(pep8_section):
if opt.replace('_', '-') not in parser.config_options:
print(" unknown option '%s' ignored" % opt)
continue
if options.verbose > 1:
print(" %s = %s" % (opt, config.get(pep8_section, opt)))
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pep8_section, opt)
elif opt_type == 'string':
value = config.get(pep8_section, opt)
if normalized_opt == 'exclude':
value = normalize_paths(value, local_dir)
else:
assert opt_type in ('store_true', 'store_false')
value = config.getboolean(pep8_section, opt)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
(options, __) = parser.parse_args(arglist, values=new_options)
options.doctest = options.testsuite = False
return options
def process_options(arglist=None, parse_argv=False, config_file=None,
parser=None):
"""Process options passed either via arglist or via command line args."""
if not parser:
parser = get_parser()
if not parser.has_option('--config'):
if config_file is True:
config_file = DEFAULT_CONFIG
group = parser.add_option_group("Configuration", description=(
"The project options are read from the [%s] section of the "
"tox.ini file or the setup.cfg file located in any parent folder "
"of the path(s) being processed. Allowed options are: %s." %
(parser.prog, ', '.join(parser.config_options))))
group.add_option('--config', metavar='path', default=config_file,
help="user config file location (default: %default)")
# Don't read the command line if the module is used as a library.
if not arglist and not parse_argv:
arglist = []
# If parse_argv is True and arglist is None, arguments are
# parsed from the command line (sys.argv)
(options, args) = parser.parse_args(arglist)
options.reporter = None
if options.ensure_value('testsuite', False):
args.append(options.testsuite)
elif not options.ensure_value('doctest', False):
if parse_argv and not args:
if options.diff or any(os.path.exists(name)
for name in PROJECT_CONFIG):
args = ['.']
else:
parser.error('input not specified')
options = read_config(options, args, arglist, parser)
options.reporter = parse_argv and options.quiet == 1 and FileReport
options.filename = options.filename and options.filename.split(',')
options.exclude = normalize_paths(options.exclude)
options.select = options.select and options.select.split(',')
options.ignore = options.ignore and options.ignore.split(',')
if options.diff:
options.reporter = DiffReport
stdin = stdin_get_value()
options.selected_lines = parse_udiff(stdin, options.filename, args[0])
args = sorted(options.selected_lines)
return options, args
def _main():
"""Parse options and run checks on Python source."""
import signal
# Handle "Broken pipe" gracefully
try:
signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
except AttributeError:
pass # not supported on Windows
pep8style = StyleGuide(parse_argv=True, config_file=True)
options = pep8style.options
if options.doctest or options.testsuite:
from testsuite.support import run_tests
report = run_tests(pep8style)
else:
report = pep8style.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if options.testsuite and not options.quiet:
report.print_results()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1)
if __name__ == '__main__':
_main()
|
ishay2b/tensorflow
|
refs/heads/segnet
|
tensorflow/python/kernel_tests/argmax_op_test.py
|
56
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMaxTest(test.TestCase):
def _testArg(self,
method,
x,
axis,
expected_values,
use_gpu=False,
expected_err_re=None):
with self.test_session(use_gpu=use_gpu):
ans = method(x, axis=axis)
if expected_err_re is None:
tf_ans = ans.eval()
# Defaults to int64 output.
self.assertEqual(np.int64, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
self.assertShapeEqual(expected_values, ans)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def _testBothArg(self,
method,
x,
axis,
expected_values,
expected_err_re=None):
self._testArg(method, x, axis, expected_values, True, expected_err_re)
self._testArg(method, x, axis, expected_values, False, expected_err_re)
def _testBasic(self, dtype):
x = np.asarray(100 * np.random.randn(200), dtype=dtype)
# Check that argmin and argmax match numpy along the primary axis
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
def _testDim(self, dtype):
x = np.asarray(100 * np.random.randn(3, 2, 4, 5, 6), dtype=dtype)
# Check that argmin and argmax match numpy along all axes
for axis in range(-5, 5):
self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))
def testFloat(self):
self._testBasic(np.float32)
self._testDim(np.float32)
def testFloatInt32Output(self):
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
expected_values = x.argmax()
with self.test_session(use_gpu=True):
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
tf_ans = ans.eval()
self.assertEqual(np.int32, tf_ans.dtype)
# The values are equal when comparing int32 to int64 because
# the values don't have a range that exceeds 32-bit integers.
self.assertAllEqual(tf_ans, expected_values)
expected_values = x.argmin()
with self.test_session(use_gpu=True):
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
tf_ans = ans.eval()
self.assertEqual(np.int32, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
def testDouble(self):
self._testBasic(np.float64)
self._testDim(np.float64)
def testInt32(self):
self._testBasic(np.int32)
self._testDim(np.int32)
def testInt64(self):
self._testBasic(np.int64)
self._testDim(np.int64)
def testEmpty(self):
with self.test_session():
for op in math_ops.argmin, math_ops.argmax:
with self.assertRaisesOpError(
r"Reduction axis 0 is empty in shape \[0\]"):
op([], 0).eval()
def testDefaultAxis(self):
with self.test_session():
for op in math_ops.argmin, math_ops.argmax:
ans = op([1]).eval()
self.assertAllEqual(ans, 0)
if __name__ == "__main__":
test.main()
|
altairpearl/scikit-learn
|
refs/heads/master
|
examples/mixture/plot_bayesian_gaussian_mixture.py
|
2
|
"""
======================================================
Bayesian Gaussian Mixture Concentration Prior Analysis
======================================================
Plot the resulting ellipsoids of a mixture of three Gaussians with
variational Bayesian Gaussian Mixture for three different values on the
prior the dirichlet concentration.
For all models, the Variationnal Bayesian Gaussian Mixture adapts its number of
mixture automatically. The parameter `dirichlet_concentration_prior` has a
direct link with the resulting number of components. Specifying a high value of
`dirichlet_concentration_prior` leads more often to uniformly-sized mixture
components, while specifying small (under 0.1) values will lead to some mixture
components getting almost all the points while most mixture components will be
centered on just a few of the remaining points.
"""
# Author: Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from sklearn.mixture import BayesianGaussianMixture
print(__doc__)
def plot_ellipses(ax, weights, means, covars):
for n in range(means.shape[0]):
v, w = np.linalg.eigh(covars[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2 * np.sqrt(2) * np.sqrt(v)
ell = mpl.patches.Ellipse(means[n, :2], v[0], v[1], 180 + angle)
ell.set_clip_box(ax.bbox)
ell.set_alpha(weights[n])
ax.add_artist(ell)
def plot_results(ax1, ax2, estimator, dirichlet_concentration_prior, X, y, plot_title=False):
estimator.dirichlet_concentration_prior = dirichlet_concentration_prior
estimator.fit(X)
ax1.set_title("Bayesian Gaussian Mixture for "
r"$dc_0=%.1e$" % dirichlet_concentration_prior)
# ax1.axis('equal')
ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8)
ax1.set_xlim(-2., 2.)
ax1.set_ylim(-3., 3.)
ax1.set_xticks(())
ax1.set_yticks(())
plot_ellipses(ax1, estimator.weights_, estimator.means_,
estimator.covariances_)
ax2.get_xaxis().set_tick_params(direction='out')
ax2.yaxis.grid(True, alpha=0.7)
for k, w in enumerate(estimator.weights_):
ax2.bar(k - .45, w, width=0.9, color='royalblue', zorder=3)
ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.),
horizontalalignment='center')
ax2.set_xlim(-.6, 2 * n_components - .4)
ax2.set_ylim(0., 1.1)
ax2.tick_params(axis='y', which='both', left='off',
right='off', labelleft='off')
ax2.tick_params(axis='x', which='both', top='off')
if plot_title:
ax1.set_ylabel('Estimated Mixtures')
ax2.set_ylabel('Weight of each component')
# Parameters
random_state = 2
n_components, n_features = 3, 2
colors = np.array(['mediumseagreen', 'royalblue', 'r', 'gold',
'orchid', 'indigo', 'darkcyan', 'tomato'])
dirichlet_concentration_prior = np.logspace(-3, 3, 3)
covars = np.array([[[.7, .0], [.0, .1]],
[[.5, .0], [.0, .1]],
[[.5, .0], [.0, .1]]])
samples = np.array([200, 500, 200])
means = np.array([[.0, -.70],
[.0, .0],
[.0, .70]])
# Here we put beta_prior to 0.8 to minimize the influence of the prior for this
# dataset
estimator = BayesianGaussianMixture(n_components=2 * n_components,
init_params='random', max_iter=1500,
mean_precision_prior=.8, tol=1e-9,
random_state=random_state)
# Generate data
rng = np.random.RandomState(random_state)
X = np.vstack([
rng.multivariate_normal(means[j], covars[j], samples[j])
for j in range(n_components)])
y = np.concatenate([j * np.ones(samples[j], dtype=int)
for j in range(n_components)])
# Plot Results
plt.figure(figsize=(4.7 * 3, 8))
plt.subplots_adjust(bottom=.04, top=0.95, hspace=.05, wspace=.05,
left=.03, right=.97)
gs = gridspec.GridSpec(3, len(dirichlet_concentration_prior))
for k, dc in enumerate(dirichlet_concentration_prior):
plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]),
estimator, dc, X, y, plot_title=k == 0)
plt.show()
|
dyyi/moneybook
|
refs/heads/master
|
venv/Lib/site-packages/pkg_resources/_vendor/packaging/version.py
|
1151
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = [
"parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
]
_Version = collections.namedtuple(
"_Version",
["epoch", "release", "dev", "pre", "post", "local"],
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
_legacy_version_component_re = re.compile(
r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
)
_legacy_version_replacement_map = {
"pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
|
StanleyRoboticsInc/vector
|
refs/heads/master
|
vector_common/vector_third_party/ds4drv/src/ds4drv/utils.py
|
5
|
import sys
from .device import DS4Report
VALID_BUTTONS = DS4Report.__slots__
def iter_except(func, exception, first=None):
"""Call a function repeatedly until an exception is raised.
Converts a call-until-exception interface to an iterator interface.
Like __builtin__.iter(func, sentinel) but uses an exception instead
of a sentinel to end the loop.
"""
try:
if first is not None:
yield first()
while True:
yield func()
except exception:
pass
def parse_button_combo(combo, sep="+"):
def button_prefix(button):
button = button.strip()
if button in ("up", "down", "left", "right"):
prefix = "dpad_"
else:
prefix = "button_"
if prefix + button not in VALID_BUTTONS:
raise ValueError("Invalid button: {0}".format(button))
return prefix + button
return tuple(map(button_prefix, combo.lower().split(sep)))
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
def zero_copy_slice(buf, start=None, end=None):
# No need for an extra copy on Python 3.3+
if sys.version_info[0] == 3 and sys.version_info[1] >= 3:
buf = memoryview(buf)
return buf[start:end]
|
mgadi/naemonbox
|
refs/heads/master
|
sources/psdash/Werkzeug-0.7/tests/test_compat.py
|
2
|
# -*- coding: utf-8 -*-
import sys
import warnings
from subprocess import Popen, PIPE
from werkzeug.test import create_environ
import_code = '''\
import sys
sys.path.insert(0, '..')
import werkzeug.%s
print ':'.join([k[9:] for k, v in sys.modules.iteritems()
if v is not None and k.startswith('werkzeug.')])
'''
def perform_import(module, allowed):
client = Popen([sys.executable, '-c', import_code % module],
stdout=PIPE)
imported = set(client.communicate()[0].strip().split(':'))
rv = imported - allowed - set([module])
print 'leftovers from %r import: %s' % (module, rv)
return rv
def test_old_imports():
"""Make sure everything imports from old places"""
from werkzeug.utils import Headers, MultiDict, CombinedMultiDict, \
Headers, EnvironHeaders
from werkzeug.http import Accept, MIMEAccept, CharsetAccept, \
LanguageAccept, ETags, HeaderSet, WWWAuthenticate, \
Authorization
def test_exposed_werkzeug_mod():
"""Make sure all things are importable."""
import werkzeug
for key in werkzeug.__all__:
getattr(werkzeug, key)
def test_demand_import():
"""Make sure that we're not importing too much."""
allowed_imports = set(['_internal', 'utils', 'http', 'exceptions',
'datastructures'])
assert perform_import('http', allowed_imports) == set()
assert perform_import('utils', allowed_imports) == set()
allowed_imports.update(('urls', 'formparser', 'wsgi'))
assert perform_import('wrappers', allowed_imports) == set()
allowed_imports.add('wrappers')
assert perform_import('useragents', allowed_imports) == set()
assert perform_import('test', allowed_imports) == set()
assert perform_import('serving', allowed_imports) == set()
def test_fix_headers_in_response():
"""Make sure fix_headers still works for backwards compatibility"""
# ignore some warnings werkzeug emits for backwards compat
for msg in ['called into deprecated fix_headers',
'fix_headers changed behavior']:
warnings.filterwarnings('ignore', message=msg,
category=DeprecationWarning)
from werkzeug.wrappers import Response
class MyResponse(Response):
def fix_headers(self, environ):
Response.fix_headers(self, environ)
self.headers['x-foo'] = "meh"
myresp = MyResponse('Foo')
resp = Response.from_app(myresp, create_environ(method='GET'))
assert resp.headers['x-foo'] == 'meh'
assert resp.data == 'Foo'
warnings.resetwarnings()
|
dcrosta/synk
|
refs/heads/master
|
synk/web.py
|
1
|
from synk.models import User
from synk.forms import UserForm
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from google.appengine.ext.webapp import template
# utilities
def render(request, template_filename, **kwargs):
template_filename = 'synk/templates/' + template_filename
return HttpResponse(template.render(template_filename, kwargs))
# view methods
def index(request):
return render(request, 'index.html')
def register(request):
form = UserForm()
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
u = User()
u.username = request.POST['username']
u.set_password(request.POST['password'])
u.put()
return HttpResponseRedirect('/')
return render(request, 'register.html',
form=form,
form_action=reverse('synk.web.register'),
submit_button='Register',
)
def dev(request):
return render(request, 'dev.html')
|
2013Commons/HUE-SHARK
|
refs/heads/master
|
apps/oozie/src/oozie/migrations/0016_auto__add_field_coordinator_job_properties.py
|
1
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Coordinator.job_properties'
db.add_column('oozie_coordinator', 'job_properties', self.gf('django.db.models.fields.TextField')(default='[]'), keep_default=False)
def backwards(self, orm):
# Deleting field 'Coordinator.job_properties'
db.delete_column('oozie_coordinator', 'job_properties')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 16, 9, 46, 22, 231292)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 13, 9, 46, 22, 231260)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 13, 9, 46, 22, 232054)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
'oozie.generic': {
'Meta': {'object_name': 'Generic'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-site.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
|
turbidsoul/tsutil
|
refs/heads/master
|
__test__/util_test.py
|
1
|
# -*- coding: utf-8 -*-
from tsutil.util import singleton, singleton_fun
from unittest import TestCase, main
@singleton
class SingleSample(object):
pass
@singleton_fun
def single_sample(a, b):
return a + b
class SingleTest(TestCase):
def test_singleclass(self):
s1 = SingleSample()
s2 = SingleSample()
self.assertEqual(s1, s2)
def test_singlefunc(self):
s1 = single_sample(1, 2)
s2 = single_sample(1, 2)
self.assertEqual(s1, s2)
def test_singlefunc2(self):
s1 = single_sample(1, 2)
s2 = single_sample(1, 2)
self.assertIs(s1, s2)
def test_singlefunc3(self):
s1 = single_sample(2, 2)
s2 = single_sample(2, 3)
self.assertIsNot(s1, s2)
def test_singlefunc4(self):
s1 = single_sample(2, 2)
s2 = single_sample(2, 3)
self.assertIsNot(hash(s1), hash(s2))
if __name__ == "__main__":
main()
|
hachard/Cra-Magnet
|
refs/heads/master
|
flask/lib/python3.5/site-packages/flask/cli.py
|
42
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock, Thread
from functools import update_wrapper
import click
from ._compat import iteritems, reraise
from .helpers import get_debug_flag
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in 'app', 'application':
app = getattr(module, attr_name, None)
if app is not None and isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for k, v in iteritems(module.__dict__)
if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
raise NoAppException('Failed to find application in module "%s". Are '
'you sure it contains a Flask application? Maybe '
'you wrapped it in a WSGI middleware or you are '
'using a factory function.' % module.__name__)
def prepare_exec_for_file(filename):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
module = []
# Chop off file extensions or package markers
if os.path.split(filename)[1] == '__init__.py':
filename = os.path.dirname(filename)
elif filename.endswith('.py'):
filename = filename[:-3]
else:
raise NoAppException('The file provided (%s) does exist but is not a '
'valid Python file. This means that it cannot '
'be used as application. Please change the '
'extension to .py' % filename)
filename = os.path.realpath(filename)
dirpath = filename
while 1:
dirpath, extra = os.path.split(dirpath)
module.append(extra)
if not os.path.isfile(os.path.join(dirpath, '__init__.py')):
break
sys.path.insert(0, dirpath)
return '.'.join(module[::-1])
def locate_app(app_id):
"""Attempts to locate the application."""
__traceback_hide__ = True
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
__import__(module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app
def find_default_import_path():
app = os.environ.get('FLASK_APP')
if app is None:
return
if os.path.isfile(app):
return prepare_exec_for_file(app)
return app
class DispatchingApp(object):
"""Special application that dispatches to a flask application which
is imported by name in a background thread. If an error happens
it is is recorded and shows as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None):
if create_app is None:
if app_import_path is None:
app_import_path = find_default_import_path()
self.app_import_path = app_import_path
else:
app_import_path = None
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
rv = self.create_app(self)
else:
if not self.app_import_path:
raise NoAppException(
'Could not locate Flask application. You did not provide '
'the FLASK_APP environment variable.\n\nFor more '
'information see '
'http://flask.pocoo.org/docs/latest/quickstart/')
rv = locate_app(self.app_import_path)
debug = get_debug_flag()
if debug is not None:
rv.debug = debug
self._loaded_app = rv
return rv
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param create_app: an optional callback that is passed the script info
and returns the loaded app.
"""
def __init__(self, add_default_commands=True, create_app=None, **extra):
AppGroup.__init__(self, **extra)
self.create_app = create_app
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points('flask.commands'):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
pass
return sorted(rv)
def main(self, *args, **kwargs):
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return AppGroup.main(self, *args, **kwargs)
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=False,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads):
"""Runs a local development server for the Flask application.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments. By default it will
not support any sort of concurrency at all to simplify debugging. This
can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
"""
from werkzeug.serving import run_simple
debug = get_debug_flag()
if reload is None:
reload = bool(debug)
if debugger is None:
debugger = bool(debug)
if eager_loading is None:
eager_loading = not reload
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
# Extra startup messages. This depends a but on Werkzeug internals to
# not double execute when the reloader kicks in.
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
# If we have an import path we can print it out now which can help
# people understand what's being served. If we do not have an
# import path because the app was loaded through a callback then
# we won't print anything.
if info.app_import_path is not None:
print(' * Serving Flask app "%s"' % info.app_import_path)
if debug is not None:
print(' * Forcing debug mode %s' % (debug and 'on' or 'off'))
run_simple(host, port, app, use_reloader=reload,
use_debugger=debugger, threaded=with_threads)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.debug and ' [debug]' or '',
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
cli = FlaskGroup(help="""\
This shell command acts as general utility script for Flask applications.
It loads the application configured (either through the FLASK_APP environment
variable) and then provides commands either provided by the application or
Flask itself.
The most useful commands are the "run" and "shell" command.
Example usage:
\b
%(prefix)s%(cmd)s FLASK_APP=hello
%(prefix)s%(cmd)s FLASK_DEBUG=1
%(prefix)sflask run
""" % {
'cmd': os.name == 'posix' and 'export' or 'set',
'prefix': os.name == 'posix' and '$ ' or '',
})
def main(as_module=False):
this_module = __package__ + '.cli'
args = sys.argv[1:]
if as_module:
if sys.version_info >= (2, 7):
name = 'python -m ' + this_module.rsplit('.', 1)[0]
else:
name = 'python -m ' + this_module
# This module is always executed as "python -m flask.run" and as such
# we need to ensure that we restore the actual command line so that
# the reloader can properly operate.
sys.argv = ['-m', this_module] + sys.argv[1:]
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
mikegrima/botor
|
refs/heads/master
|
botor/aws/iam.py
|
1
|
from botor.aws.sts import sts_conn
from botor.decorators import rate_limited
from joblib import Parallel, delayed
@sts_conn('iam')
@rate_limited()
def list_roles(**kwargs):
client = kwargs['client']
roles = []
marker = {}
while True:
response = client.list_roles(**marker)
roles.extend(response['Roles'])
if response['IsTruncated']:
marker['Marker'] = response['Marker']
else:
return roles
@rate_limited()
@sts_conn('iam', service_type='client')
def get_role_inline_policy_names(role, client=None, **kwargs):
marker = {}
inline_policies = []
while True:
response = client.list_role_policies(
RoleName=role['RoleName'],
**marker
)
inline_policies.extend(response['PolicyNames'])
if response['IsTruncated']:
marker['Marker'] = response['Marker']
else:
return inline_policies
def get_role_inline_policies(role, **kwargs):
policy_names = get_role_inline_policy_names(role, **kwargs)
policies = zip(
policy_names,
Parallel(n_jobs=20, backend="threading")(
delayed(get_role_inline_policy_document)
(role, policy_name, **kwargs) for policy_name in policy_names
)
)
policies = dict(policies)
return policies
@sts_conn('iam', service_type='client')
@rate_limited()
def get_role_inline_policy_document(role, policy_name, client=None, **kwargs):
response = client.get_role_policy(
RoleName=role['RoleName'],
PolicyName=policy_name
)
return response.get('PolicyDocument')
@sts_conn('iam', service_type='client')
@rate_limited()
def get_role_instance_profiles(role, client=None, **kwargs):
marker = {}
instance_profiles = []
while True:
response = client.list_instance_profiles_for_role(
RoleName=role['RoleName'],
**marker
)
instance_profiles.extend(response['InstanceProfiles'])
if response['IsTruncated']:
marker['Marker'] = response['Marker']
else:
break
return [
{
'path': ip['Path'],
'instance_profile_name': ip['InstanceProfileName'],
'create_date': ip['CreateDate'].strftime('%Y-%m-%dT%H:%M:%SZ'),
'instance_profile_id': ip['InstanceProfileId'],
'arn': ip['Arn']
} for ip in instance_profiles
]
@sts_conn('iam', service_type='client')
@rate_limited()
def get_role_managed_policies(role, client=None, **kwargs):
marker = {}
policies = []
while True:
response = client.list_attached_role_policies(
RoleName=role['RoleName'],
**marker
)
policies.extend(response['AttachedPolicies'])
if response['IsTruncated']:
marker['Marker'] = response['Marker']
else:
break
return [{'name': p['PolicyName'], 'arn': p['PolicyArn']} for p in policies]
@sts_conn('iam', service_type='resource')
@rate_limited()
def all_managed_policies(resource=None, **kwargs):
managed_policies = {}
for policy in resource.policies.all():
for attached_role in policy.attached_roles.all():
policy_dict = {
"name": policy.policy_name,
"arn": policy.arn,
"version": policy.default_version_id
}
if attached_role.arn not in managed_policies:
managed_policies[attached_role.arn] = [policy_dict]
else:
managed_policies[attached_role.arn].append(policy_dict)
return managed_policies
|
LukeHoersten/ansible-modules-core
|
refs/heads/devel
|
cloud/amazon/iam_cert.py
|
102
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_cert
short_description: Manage server certificates for use on ELBs and CloudFront
description:
- Allows for the management of server certificates
version_added: "2.0"
options:
name:
description:
- Name of certificate to add, update or remove.
required: true
aliases: []
new_name:
description:
- When present, this will update the name of the cert with the value passed here.
required: false
aliases: []
new_path:
description:
- When present, this will update the path of the cert with the value passed here.
required: false
aliases: []
state:
description:
- Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified.
required: true
default: null
choices: [ "present", "absent" ]
aliases: []
path:
description:
- When creating or updating, specify the desired path of the certificate
required: false
default: "/"
aliases: []
cert_chain:
description:
- The path to the CA certificate chain in PEM encoded format.
required: false
default: null
aliases: []
cert:
description:
- The path to the certificate body in PEM encoded format.
required: false
aliases: []
key:
description:
- The path to the private key of the certificate in PEM encoded format.
dup_ok:
description:
- By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique.
required: false
default: False
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
requirements: [ "boto" ]
author: Jonathan I. Davila
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic server certificate upload
tasks:
- name: Upload Certifcate
iam_cert:
name: very_ssl
state: present
cert: somecert.pem
key: privcertkey
cert_chain: myverytrustedchain
'''
import json
import sys
try:
import boto
import boto.iam
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def cert_meta(iam, name):
opath = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
path
ocert = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
certificate_body
ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
server_certificate_id
upload_date = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
upload_date
exp = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
expiration
return opath, ocert, ocert_id, upload_date, exp
def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
update=False
if any(ct in orig_cert_names for ct in [name, new_name]):
for i_name in [name, new_name]:
if i_name is None:
continue
if cert is not None:
try:
c_index=orig_cert_names.index(i_name)
except NameError:
continue
else:
if orig_cert_bodies[c_index] == cert:
update=True
break
elif orig_cert_bodies[c_index] != cert:
module.fail_json(changed=False, msg='A cert with the name %s already exists and'
' has a different certificate body associated'
' with it. Certifcates cannot have the same name')
else:
update=True
break
elif cert in orig_cert_bodies and not dup_ok:
for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
if crt_body == cert:
module.fail_json(changed=False, msg='This certificate already'
' exists under the name %s' % crt_name)
return update
def cert_action(module, iam, name, cpath, new_name, new_path, state,
cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok):
if state == 'present':
update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
orig_cert_bodies, dup_ok)
if update:
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
changed=True
if new_name and new_path:
iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif new_name and not new_path:
iam.update_server_cert(name, new_cert_name=new_name)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif not new_name and new_path:
iam.update_server_cert(name, new_path=new_path)
module.exit_json(changed=changed, name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
else:
changed=False
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp,
msg='No new path or name specified. No changes made')
else:
changed=True
iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath)
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif state == 'absent':
if name in orig_cert_names:
changed=True
iam.delete_server_cert(name)
module.exit_json(changed=changed, deleted_cert=name)
else:
changed=False
module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(
default=None, required=True, choices=['present', 'absent']),
name=dict(default=None, required=False),
cert=dict(default=None, required=False),
key=dict(default=None, required=False),
cert_chain=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False),
dup_ok=dict(default=False, required=False, choices=[False, True])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module")
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
try:
iam = boto.iam.connection.IAMConnection(
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
state = module.params.get('state')
name = module.params.get('name')
path = module.params.get('path')
new_name = module.params.get('new_name')
new_path = module.params.get('new_path')
cert_chain = module.params.get('cert_chain')
dup_ok = module.params.get('dup_ok')
if state == 'present':
cert = open(module.params.get('cert'), 'r').read().rstrip()
key = open(module.params.get('key'), 'r').read().rstrip()
if cert_chain is not None:
cert_chain = open(module.params.get('cert_chain'), 'r').read()
else:
key=cert=chain=None
orig_certs = [ctb['server_certificate_name'] for ctb in \
iam.get_all_server_certs().\
list_server_certificates_result.\
server_certificate_metadata_list]
orig_bodies = [iam.get_server_certificate(thing).\
get_server_certificate_result.\
certificate_body \
for thing in orig_certs]
if new_name == name:
new_name = None
if new_path == path:
new_path = None
changed = False
try:
cert_action(module, iam, name, path, new_name, new_path, state,
cert, key, cert_chain, orig_certs, orig_bodies, dup_ok)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err), debug=[cert,key])
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
dimdung/boto
|
refs/heads/develop
|
tests/integration/datapipeline/test_layer1.py
|
136
|
#!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
from tests.unit import unittest
from boto.datapipeline import layer1
class TestDataPipeline(unittest.TestCase):
datapipeline = True
def setUp(self):
self.connection = layer1.DataPipelineConnection()
self.sample_pipeline_objects = [
{'fields': [
{'key': 'workerGroup', 'stringValue': 'MyworkerGroup'}],
'id': 'Default',
'name': 'Default'},
{'fields': [
{'key': 'startDateTime', 'stringValue': '2012-09-25T17:00:00'},
{'key': 'type', 'stringValue': 'Schedule'},
{'key': 'period', 'stringValue': '1 hour'},
{'key': 'endDateTime', 'stringValue': '2012-09-25T18:00:00'}],
'id': 'Schedule',
'name': 'Schedule'},
{'fields': [
{'key': 'type', 'stringValue': 'ShellCommandActivity'},
{'key': 'command', 'stringValue': 'echo hello'},
{'key': 'parent', 'refValue': 'Default'},
{'key': 'schedule', 'refValue': 'Schedule'}],
'id': 'SayHello',
'name': 'SayHello'}
]
self.connection.auth_service_name = 'datapipeline'
def create_pipeline(self, name, unique_id, description=None):
response = self.connection.create_pipeline(name, unique_id,
description)
pipeline_id = response['pipelineId']
self.addCleanup(self.connection.delete_pipeline, pipeline_id)
return pipeline_id
def get_pipeline_state(self, pipeline_id):
response = self.connection.describe_pipelines([pipeline_id])
for attr in response['pipelineDescriptionList'][0]['fields']:
if attr['key'] == '@pipelineState':
return attr['stringValue']
def test_can_create_and_delete_a_pipeline(self):
response = self.connection.create_pipeline('name', 'unique_id',
'description')
self.connection.delete_pipeline(response['pipelineId'])
def test_validate_pipeline(self):
pipeline_id = self.create_pipeline('name2', 'unique_id2')
self.connection.validate_pipeline_definition(
self.sample_pipeline_objects, pipeline_id)
def test_put_pipeline_definition(self):
pipeline_id = self.create_pipeline('name3', 'unique_id3')
self.connection.put_pipeline_definition(self.sample_pipeline_objects,
pipeline_id)
# We should now be able to get the pipeline definition and see
# that it matches what we put.
response = self.connection.get_pipeline_definition(pipeline_id)
objects = response['pipelineObjects']
self.assertEqual(len(objects), 3)
self.assertEqual(objects[0]['id'], 'Default')
self.assertEqual(objects[0]['name'], 'Default')
self.assertEqual(objects[0]['fields'],
[{'key': 'workerGroup', 'stringValue': 'MyworkerGroup'}])
def test_activate_pipeline(self):
pipeline_id = self.create_pipeline('name4', 'unique_id4')
self.connection.put_pipeline_definition(self.sample_pipeline_objects,
pipeline_id)
self.connection.activate_pipeline(pipeline_id)
attempts = 0
state = self.get_pipeline_state(pipeline_id)
while state != 'SCHEDULED' and attempts < 10:
time.sleep(10)
attempts += 1
state = self.get_pipeline_state(pipeline_id)
if attempts > 10:
self.fail("Pipeline did not become scheduled "
"after 10 attempts.")
objects = self.connection.describe_objects(['Default'], pipeline_id)
field = objects['pipelineObjects'][0]['fields'][0]
self.assertDictEqual(field, {'stringValue': 'COMPONENT', 'key': '@sphere'})
def test_list_pipelines(self):
pipeline_id = self.create_pipeline('name5', 'unique_id5')
pipeline_id_list = [p['id'] for p in
self.connection.list_pipelines()['pipelineIdList']]
self.assertTrue(pipeline_id in pipeline_id_list)
if __name__ == '__main__':
unittest.main()
|
libracore/erpnext
|
refs/heads/v12
|
erpnext/patches/v8_10/change_default_customer_credit_days.py
|
11
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("selling", "doctype", "customer")
frappe.reload_doc("buying", "doctype", "supplier")
frappe.reload_doc("setup", "doctype", "supplier_type")
frappe.reload_doc("accounts", "doctype", "payment_term")
frappe.reload_doc("accounts", "doctype", "payment_terms_template_detail")
frappe.reload_doc("accounts", "doctype", "payment_terms_template")
payment_terms = []
records = []
for doctype in ("Customer", "Supplier", "Supplier Type"):
credit_days = frappe.db.sql("""
SELECT DISTINCT `credit_days`, `credit_days_based_on`, `name`
from `tab{0}`
where
((credit_days_based_on='Fixed Days' or credit_days_based_on is null)
and credit_days is not null)
or credit_days_based_on='Last Day of the Next Month'
""".format(doctype))
credit_records = ((record[0], record[1], record[2]) for record in credit_days)
for days, based_on, party_name in credit_records:
if based_on == "Fixed Days":
pyt_template_name = 'Default Payment Term - N{0}'.format(days)
else:
pyt_template_name = 'Default Payment Term - EO2M'
if not frappe.db.exists("Payment Terms Template", pyt_template_name):
payment_term = make_payment_term(days, based_on)
template = make_template(payment_term)
else:
template = frappe.get_doc("Payment Terms Template", pyt_template_name)
payment_terms.append('WHEN `name`={0} THEN {1}'.format(frappe.db.escape(party_name), template.template_name))
records.append(frappe.db.escape(party_name))
begin_query_str = "UPDATE `tab{0}` SET `payment_terms` = CASE ".format(doctype)
value_query_str = " ".join(payment_terms)
cond_query_str = " ELSE `payment_terms` END WHERE "
if records:
frappe.db.sql(
begin_query_str + value_query_str + cond_query_str + '`name` IN %s',
(records,)
)
def make_template(payment_term):
doc = frappe.new_doc('Payment Terms Template Detail')
doc.payment_term = payment_term.payment_term_name
doc.due_date_based_on = payment_term.due_date_based_on
doc.invoice_portion = payment_term.invoice_portion
doc.description = payment_term.description
doc.credit_days = payment_term.credit_days
doc.credit_months = payment_term.credit_months
template = frappe.new_doc('Payment Terms Template')
template.template_name = 'Default Payment Term - {0}'.format(payment_term.payment_term_name)
template.append('terms', doc)
template.save()
return template
def make_payment_term(days, based_on):
based_on_map = {
'Fixed Days': 'Day(s) after invoice date',
'Last Day of the Next Month': 'Month(s) after the end of the invoice month'
}
doc = frappe.new_doc('Payment Term')
doc.due_date_based_on = based_on_map.get(based_on)
doc.invoice_portion = 100
if based_on == 'Fixed Days':
doc.credit_days = days
doc.description = 'Net payable within {0} days'.format(days)
doc.payment_term_name = 'N{0}'.format(days)
else:
doc.credit_months = 1
doc.description = 'Net payable by the end of next month'
doc.payment_term_name = 'EO2M'
doc.save()
return doc
|
aleida/django
|
refs/heads/master
|
django/contrib/localflavor/at/forms.py
|
109
|
"""
AT-specific Form helpers
"""
from __future__ import unicode_literals
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
re_ssn = re.compile(r'^\d{4} \d{6}')
class ATZipCodeField(RegexField):
"""
A form field that validates its input is an Austrian postcode.
Accepts 4 digits (first digit must be greater than 0).
"""
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ATZipCodeField, self).__init__(r'^[1-9]{1}\d{3}$',
max_length, min_length, *args, **kwargs)
class ATStateSelect(Select):
"""
A Select widget that uses a list of AT states as its choices.
"""
def __init__(self, attrs=None):
from django.contrib.localflavor.at.at_states import STATE_CHOICES
super(ATStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class ATSocialSecurityNumberField(Field):
"""
Austrian Social Security numbers are composed of a 4 digits and 6 digits
field. The latter represents in most cases the person's birthdate while
the first 4 digits represent a 3-digits counter and a one-digit checksum.
The 6-digits field can also differ from the person's birthdate if the
3-digits counter suffered an overflow.
This code is based on information available on
http://de.wikipedia.org/wiki/Sozialversicherungsnummer#.C3.96sterreich
"""
default_error_messages = {
'invalid': _('Enter a valid Austrian Social Security Number in XXXX XXXXXX format.'),
}
def clean(self, value):
value = super(ATSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ""
if not re_ssn.search(value):
raise ValidationError(self.error_messages['invalid'])
sqnr, date = value.split(" ")
sqnr, check = (sqnr[:3], (sqnr[3]))
if int(sqnr) < 100:
raise ValidationError(self.error_messages['invalid'])
res = int(sqnr[0])*3 + int(sqnr[1])*7 + int(sqnr[2])*9 \
+ int(date[0])*5 + int(date[1])*8 + int(date[2])*4 \
+ int(date[3])*2 + int(date[4])*1 + int(date[5])*6
res = res % 11
if res != int(check):
raise ValidationError(self.error_messages['invalid'])
return '%s%s %s'%(sqnr, check, date,)
|
qiang437587687/pythonBrother
|
refs/heads/master
|
docopt-master-zhangTest/examples/git/git_branch.py
|
17
|
"""
usage: git branch [options] [-r | -a] [--merged=<commit> | --no-merged=<commit>]
git branch [options] [-l] [-f] <branchname> [<start-point>]
git branch [options] [-r] (-d | -D) <branchname>
git branch [options] (-m | -M) [<oldbranch>] <newbranch>
Generic options
-h, --help
-v, --verbose show hash and subject, give twice for upstream branch
-t, --track set up tracking mode (see git-pull(1))
--set-upstream change upstream info
--color=<when> use colored output
-r act on remote-tracking branches
--contains=<commit> print only branches that contain the commit
--abbrev=<n> use <n> digits to display SHA-1s
Specific git-branch actions:
-a list both remote-tracking and local branches
-d delete fully merged branch
-D delete branch (even if not merged)
-m move/rename a branch and its reflog
-M move/rename a branch, even if target exists
-l create the branch's reflog
-f, --force force creation (when already exists)
--no-merged=<commit> print only not merged branches
--merged=<commit> print only merged branches
"""
from docopt import docopt
if __name__ == '__main__':
print(docopt(__doc__))
|
yichaoS/cbioportal
|
refs/heads/master
|
core/src/main/scripts/flagStudyForProductionPortalDeployment.py
|
25
|
#! /usr/bin/env python
# ------------------------------------------------------------------------------
# Script which flags a study within the triage portal for deployment into
# production portal.
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# imports
import sys
import getopt
import gdata.docs.client
import gdata.docs.service
import gdata.spreadsheet.service
import httplib2
from oauth2client import client
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run_flow, argparser
# ------------------------------------------------------------------------------
# globals
# some file descriptors
ERROR_FILE = sys.stderr
OUTPUT_FILE = sys.stdout
# column constants on google spreadsheet
TRIAGE_PORTAL_KEY = "triage-portal"
MSK_AUTOMATION_PORTAL_KEY = "msk-automation-portal"
# ------------------------------------------------------------------------------
# subroutines
# ------------------------------------------------------------------------------
# logs into google spreadsheet client
def get_gdata_credentials(secrets, creds, scope, force=False):
storage = Storage(creds)
credentials = storage.get()
if credentials is None or credentials.invalid or force:
credentials = run_flow(flow_from_clientsecrets(secrets, scope=scope), storage, argparser.parse_args([]))
if credentials.access_token_expired:
credentials.refresh(httplib2.Http())
return credentials
def google_login(secrets, creds, user, pw, app_name):
credentials = get_gdata_credentials(secrets, creds, ["https://spreadsheets.google.com/feeds"], False)
client = gdata.spreadsheet.service.SpreadsheetsService(additional_headers={'Authorization' : 'Bearer %s' % credentials.access_token})
# google spreadsheet
client.email = user
client.password = pw
client.source = app_name
client.ProgrammaticLogin()
return client
# ------------------------------------------------------------------------------
# given a feed & feed name, returns its id
def get_feed_id(feed, name):
to_return = ''
for entry in feed.entry:
if entry.title.text.strip() == name:
id_parts = entry.id.text.split('/')
to_return = id_parts[len(id_parts) - 1]
return to_return
# ------------------------------------------------------------------------------
# gets a worksheet feed
def get_worksheet_feed(client, ss, ws):
ss_id = get_feed_id(client.GetSpreadsheetsFeed(), ss)
ws_id = get_feed_id(client.GetWorksheetsFeed(ss_id), ws)
return client.GetListFeed(ss_id, ws_id)
# ------------------------------------------------------------------------------
# Flags a study on the given worksheet
# for deployment into the msk automation portal.
def flag_study_for_production_portal_deployment(client, worksheet_feed, cancer_study_id, remove_from_triage):
for entry in worksheet_feed.entry:
for key in entry.custom:
if entry.custom[key].text == cancer_study_id:
client.UpdateRow(entry, get_row_data(entry, remove_from_triage))
return
# ------------------------------------------------------------------------------
# constructs new row entry
def get_row_data(entry, remove_from_triage):
dict = {}
for key in entry.custom:
if key == TRIAGE_PORTAL_KEY:
if remove_from_triage == 't':
dict[key] = 'r'
else:
dict[key] = ''
elif key == MSK_AUTOMATION_PORTAL_KEY:
dict[key] = 'x'
else:
dict[key] = entry.custom[key].text
return dict
# ------------------------------------------------------------------------------
# displays program usage (invalid args)
def usage():
print >> OUTPUT_FILE, ('flagStudyForProductionPortalDeployment.py --secrets-file [google secrets.json] --creds-file [oauth creds filename] --google-id --google-password ' +
'--google-spreadsheet --google-worksheet --cancer-study-id [STABLE_ID] [--remove-from-triage [t/f]]')
# ------------------------------------------------------------------------------
# the big deal main.
def main():
# process command line options
try:
opts, args = getopt.getopt(sys.argv[1:], '',
['secrets-file=', 'creds-file=', 'google-id=', 'google-password=',
'google-spreadsheet=', 'google-worksheet=',
'cancer-study-id=', 'remove-from-triage='])
except getopt.error, msg:
print >> ERROR_FILE, msg
usage()
sys.exit(2)
secrets_filename = ''
creds_filename = ''
google_id = ''
google_password = ''
google_spreadsheet = ''
google_worksheet = ''
cancer_study_id = ''
remove_from_triage = ''
for o, a in opts:
if o == '--secrets-file':
secrets_filename = a
elif o == '--creds-file':
creds_filename = a
elif o == '--google-id':
google_id = a
elif o == '--google-password':
google_password = a
elif o == '--google-spreadsheet':
google_spreadsheet = a
elif o == '--google-worksheet':
google_worksheet = a
elif o == '--cancer-study-id':
cancer_study_id = a
elif o == '--remove-from-triage':
remove_from_triage = a
if (secrets_filename == '' or creds_filename == '' or google_id == '' or google_password == '' or
google_spreadsheet == '' or google_worksheet == '' or cancer_study_id == ''):
usage()
sys.exit(2)
# the point of the script
client = google_login(secrets_filename, creds_filename, google_id, google_password, sys.argv[1])
worksheet_feed = get_worksheet_feed(client, google_spreadsheet, google_worksheet)
flag_study_for_production_portal_deployment(client, worksheet_feed, cancer_study_id, remove_from_triage)
# ------------------------------------------------------------------------------
# ready to roll
if __name__ == '__main__':
main()
|
da1z/intellij-community
|
refs/heads/master
|
python/testData/typeshed/conftest.py
|
22
|
import re
import os
import pip
import pytest
@pytest.fixture(scope='module')
def requirements(request):
requirements_path = re.sub(r'(.*)_test\.py', r'\1_requirements.txt',
request.module.__file__)
if os.path.exists(requirements_path):
pip.main(['install', '-r', requirements_path])
yield
# We could uninstall everything here after the module tests finish
|
adamdempsey90/fargo3d
|
refs/heads/master
|
utils/python/streamfunction.py
|
1
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.integrate as integrate
def streamfunction(u,v,dx,dy):
term1 = np.ndarray([u.shape[0],u.shape[1]])
term2 = np.ndarray([u.shape[0],u.shape[1]])
for i in range(u.shape[1]):
for j in range(u.shape[0]):
#integral en x para y0 fijo
term2[j,i] = integrate.trapz(v[j,0:i],dx=dx)
term1[j,i] = integrate.trapz(u[0:j,i],dx=dy)
print np.shape(term1-term2)
return term1 - term2
#las curvas de nivel de esta funcion son las lineas de flujo....
|
ftkghost/SuperSaver
|
refs/heads/master
|
supersaver/core/middleware/minidetector/tests/__init__.py
|
7
|
from unittest import TestSuite, TestCase, TextTestRunner, TestLoader
import minidetector
import os.path
class DummyRequest(object):
def __init__(self, useragent):
self.META = {'HTTP_USER_AGENT': useragent}
class TestHTTPHeaders(TestCase):
"""Everything that Isn't a User-Agent Header"""
def test_wap(self):
request = DummyRequest("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8b5) Gecko/20051019 Flock/0.4 Firefox/1.0+")
request.META['HTTP_ACCEPT'] = 'application/vnd.wap.xhtml+xml'
minidetector.Middleware.process_request(request)
self.assert_(request.mobile, "WAP not Detected")
def test_opera_mini(self):
request = DummyRequest("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8b5) Gecko/20051019 Flock/0.4 Firefox/1.0+")
request.META['HTTP_X_OPERAMINI_FEATURES'] = 'secure'
minidetector.Middleware.process_request(request)
self.assert_(request.mobile, "Opera Mini not Detected")
def MobileDetectionFactory(uas, expected):
class MobileDetection(TestCase):
def testUA(self, ua):
request = DummyRequest(ua)
minidetector.Middleware.process_request(request)
if self.expected:
self.assert_(request.mobile,
"Mobile Not Detected: %s" % ua)
else:
self.assert_(not request.mobile,
"Mobile Falsely Detected: %s" % ua)
def testnum(num):
def test(self):
return self.testUA(self.uas[num])
return test
MobileDetection.uas = uas
MobileDetection.expected = expected
suite = TestSuite()
for x in range(len(uas)):
if not uas[x].startswith('#'):
setattr(MobileDetection, 'test%s'%x, testnum(x))
suite.addTest(MobileDetection('test%s' % x))
return suite
def suite_from_file(filename, expected):
f = None
try:
f = open(os.path.join(os.path.dirname(__file__), filename))
uas = f.readlines()
finally:
if f:
f.close()
suite = MobileDetectionFactory(uas=uas, expected=expected)
return suite
def gen_suite():
suite = TestSuite()
suite.addTest(suite_from_file('mobile_useragents.txt', True))
suite.addTest(suite_from_file('other_useragents.txt', False))
suite.addTests(TestLoader().loadTestsFromTestCase(TestHTTPHeaders))
return suite
suite = gen_suite()
if __name__ == "__main__":
TextTestRunner().run(suite)
|
chippey/gaffer
|
refs/heads/master
|
python/GafferUI/LayoutMenu.py
|
4
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2012-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import re
import IECore
import GafferUI
## Appends a submenu of the given name to the specified IECore.MenuDefinition. The submenu
# contains commands to facilitate the administration of different UI layouts.
def appendDefinitions( menuDefinition, name="" ) :
menuDefinition.append( name, { "subMenu" : layoutMenuCallable } )
## A function suitable as the command for a Layout/Name menu item which restores a named layout.
# It must be invoked from a menu which has a ScriptWindow in its ancestry.
def restore( menu, name ) :
scriptWindow, layouts = __scriptWindowAndLayouts( menu )
layout = layouts.create( name, scriptWindow.scriptNode() )
scriptWindow.setLayout( layout )
## A function suitable as the command for a Layout/Delete/LayoutName menu item.
def delete( name, menu ) :
scriptWindow, layouts = __scriptWindowAndLayouts( menu )
layouts.remove( name )
__saveLayouts( scriptWindow.scriptNode().applicationRoot() )
## A function suitable as the command for a Layout/Save... menu item. It must be invoked from
# a menu which has a ScriptWindow in its ancestry.
def save( menu ) :
scriptWindow, layouts = __scriptWindowAndLayouts( menu )
layoutNames = layouts.names()
i = 1
while True :
layoutName = "Layout " + str( i )
i += 1
if "user:" + layoutName not in layoutNames :
break
d = GafferUI.TextInputDialogue( initialText=layoutName, title="Save Layout", confirmLabel="Save" )
t = d.waitForText( parentWindow = scriptWindow )
d.setVisible( False )
if t is None :
return
layout = scriptWindow.getLayout()
layouts.add( "user:" + t, layout )
__saveLayouts( scriptWindow.scriptNode().applicationRoot() )
def __saveLayouts( applicationRoot ) :
f = open( os.path.join( applicationRoot.preferencesLocation(), "layouts.py" ), "w" )
f.write( "# This file was automatically generated by Gaffer.\n" )
f.write( "# Do not edit this file - it will be overwritten.\n\n" )
GafferUI.Layouts.acquire( applicationRoot ).save( f, re.compile( "user:.*" ) )
def fullScreen( menu, checkBox ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
scriptWindow.setFullScreen( checkBox )
def fullScreenCheckBox( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
return scriptWindow.getFullScreen()
## The callable used to generate the submenu created by appendDefinitions().
# This is exposed publicly so it can be called by alternative submenus and
# the result edited before being given to a Menu.
def layoutMenuCallable( menu ) :
scriptWindow, layouts = __scriptWindowAndLayouts( menu )
menuDefinition = IECore.MenuDefinition()
layoutNames = layouts.names()
if layoutNames :
def restoreWrapper( name ) :
return lambda menu : restore( menu, name )
for name in layoutNames :
label = name
if label.startswith( "user:" ) :
label = label[5:]
menuDefinition.append( label, { "command" : restoreWrapper( name ) } )
menuDefinition.append( "/SetDivider", { "divider" : True } )
def deleteWrapper( name ) :
return lambda menu, : delete( name, menu )
for name in layoutNames :
if name.startswith( "user:" ) :
menuDefinition.append( "/Delete/%s" % name[5:], { "command" : deleteWrapper( name ) } )
menuDefinition.append( "/Save...", { "command" : save } )
menuDefinition.append( "/SaveDivider", { "divider" : True } )
menuDefinition.append( "/Full Screen", { "command" : fullScreen, "checkBox" : fullScreenCheckBox, "shortCut" : "`" } )
return menuDefinition
def __scriptWindowAndLayouts( menu ) :
scriptWindow = menu.ancestor( GafferUI.ScriptWindow )
layouts = GafferUI.Layouts.acquire( scriptWindow.scriptNode().applicationRoot() )
return scriptWindow, layouts
|
AwesomeTurtle/personfinder
|
refs/heads/master
|
app/vendors/unidecode/x084.py
|
252
|
data = (
'Hu ', # 0x00
'Qi ', # 0x01
'He ', # 0x02
'Cui ', # 0x03
'Tao ', # 0x04
'Chun ', # 0x05
'Bei ', # 0x06
'Chang ', # 0x07
'Huan ', # 0x08
'Fei ', # 0x09
'Lai ', # 0x0a
'Qi ', # 0x0b
'Meng ', # 0x0c
'Ping ', # 0x0d
'Wei ', # 0x0e
'Dan ', # 0x0f
'Sha ', # 0x10
'Huan ', # 0x11
'Yan ', # 0x12
'Yi ', # 0x13
'Tiao ', # 0x14
'Qi ', # 0x15
'Wan ', # 0x16
'Ce ', # 0x17
'Nai ', # 0x18
'Kutabireru ', # 0x19
'Tuo ', # 0x1a
'Jiu ', # 0x1b
'Tie ', # 0x1c
'Luo ', # 0x1d
'[?] ', # 0x1e
'[?] ', # 0x1f
'Meng ', # 0x20
'[?] ', # 0x21
'Yaji ', # 0x22
'[?] ', # 0x23
'Ying ', # 0x24
'Ying ', # 0x25
'Ying ', # 0x26
'Xiao ', # 0x27
'Sa ', # 0x28
'Qiu ', # 0x29
'Ke ', # 0x2a
'Xiang ', # 0x2b
'Wan ', # 0x2c
'Yu ', # 0x2d
'Yu ', # 0x2e
'Fu ', # 0x2f
'Lian ', # 0x30
'Xuan ', # 0x31
'Yuan ', # 0x32
'Nan ', # 0x33
'Ze ', # 0x34
'Wo ', # 0x35
'Chun ', # 0x36
'Xiao ', # 0x37
'Yu ', # 0x38
'Pian ', # 0x39
'Mao ', # 0x3a
'An ', # 0x3b
'E ', # 0x3c
'Luo ', # 0x3d
'Ying ', # 0x3e
'Huo ', # 0x3f
'Gua ', # 0x40
'Jiang ', # 0x41
'Mian ', # 0x42
'Zuo ', # 0x43
'Zuo ', # 0x44
'Ju ', # 0x45
'Bao ', # 0x46
'Rou ', # 0x47
'Xi ', # 0x48
'Xie ', # 0x49
'An ', # 0x4a
'Qu ', # 0x4b
'Jian ', # 0x4c
'Fu ', # 0x4d
'Lu ', # 0x4e
'Jing ', # 0x4f
'Pen ', # 0x50
'Feng ', # 0x51
'Hong ', # 0x52
'Hong ', # 0x53
'Hou ', # 0x54
'Yan ', # 0x55
'Tu ', # 0x56
'Zhu ', # 0x57
'Zi ', # 0x58
'Xiang ', # 0x59
'Shen ', # 0x5a
'Ge ', # 0x5b
'Jie ', # 0x5c
'Jing ', # 0x5d
'Mi ', # 0x5e
'Huang ', # 0x5f
'Shen ', # 0x60
'Pu ', # 0x61
'Gai ', # 0x62
'Dong ', # 0x63
'Zhou ', # 0x64
'Qian ', # 0x65
'Wei ', # 0x66
'Bo ', # 0x67
'Wei ', # 0x68
'Pa ', # 0x69
'Ji ', # 0x6a
'Hu ', # 0x6b
'Zang ', # 0x6c
'Jia ', # 0x6d
'Duan ', # 0x6e
'Yao ', # 0x6f
'Jun ', # 0x70
'Cong ', # 0x71
'Quan ', # 0x72
'Wei ', # 0x73
'Xian ', # 0x74
'Kui ', # 0x75
'Ting ', # 0x76
'Hun ', # 0x77
'Xi ', # 0x78
'Shi ', # 0x79
'Qi ', # 0x7a
'Lan ', # 0x7b
'Zong ', # 0x7c
'Yao ', # 0x7d
'Yuan ', # 0x7e
'Mei ', # 0x7f
'Yun ', # 0x80
'Shu ', # 0x81
'Di ', # 0x82
'Zhuan ', # 0x83
'Guan ', # 0x84
'Sukumo ', # 0x85
'Xue ', # 0x86
'Chan ', # 0x87
'Kai ', # 0x88
'Kui ', # 0x89
'[?] ', # 0x8a
'Jiang ', # 0x8b
'Lou ', # 0x8c
'Wei ', # 0x8d
'Pai ', # 0x8e
'[?] ', # 0x8f
'Sou ', # 0x90
'Yin ', # 0x91
'Shi ', # 0x92
'Chun ', # 0x93
'Shi ', # 0x94
'Yun ', # 0x95
'Zhen ', # 0x96
'Lang ', # 0x97
'Nu ', # 0x98
'Meng ', # 0x99
'He ', # 0x9a
'Que ', # 0x9b
'Suan ', # 0x9c
'Yuan ', # 0x9d
'Li ', # 0x9e
'Ju ', # 0x9f
'Xi ', # 0xa0
'Pang ', # 0xa1
'Chu ', # 0xa2
'Xu ', # 0xa3
'Tu ', # 0xa4
'Liu ', # 0xa5
'Wo ', # 0xa6
'Zhen ', # 0xa7
'Qian ', # 0xa8
'Zu ', # 0xa9
'Po ', # 0xaa
'Cuo ', # 0xab
'Yuan ', # 0xac
'Chu ', # 0xad
'Yu ', # 0xae
'Kuai ', # 0xaf
'Pan ', # 0xb0
'Pu ', # 0xb1
'Pu ', # 0xb2
'Na ', # 0xb3
'Shuo ', # 0xb4
'Xi ', # 0xb5
'Fen ', # 0xb6
'Yun ', # 0xb7
'Zheng ', # 0xb8
'Jian ', # 0xb9
'Ji ', # 0xba
'Ruo ', # 0xbb
'Cang ', # 0xbc
'En ', # 0xbd
'Mi ', # 0xbe
'Hao ', # 0xbf
'Sun ', # 0xc0
'Zhen ', # 0xc1
'Ming ', # 0xc2
'Sou ', # 0xc3
'Xu ', # 0xc4
'Liu ', # 0xc5
'Xi ', # 0xc6
'Gu ', # 0xc7
'Lang ', # 0xc8
'Rong ', # 0xc9
'Weng ', # 0xca
'Gai ', # 0xcb
'Cuo ', # 0xcc
'Shi ', # 0xcd
'Tang ', # 0xce
'Luo ', # 0xcf
'Ru ', # 0xd0
'Suo ', # 0xd1
'Xian ', # 0xd2
'Bei ', # 0xd3
'Yao ', # 0xd4
'Gui ', # 0xd5
'Bi ', # 0xd6
'Zong ', # 0xd7
'Gun ', # 0xd8
'Za ', # 0xd9
'Xiu ', # 0xda
'Ce ', # 0xdb
'Hai ', # 0xdc
'Lan ', # 0xdd
'[?] ', # 0xde
'Ji ', # 0xdf
'Li ', # 0xe0
'Can ', # 0xe1
'Lang ', # 0xe2
'Yu ', # 0xe3
'[?] ', # 0xe4
'Ying ', # 0xe5
'Mo ', # 0xe6
'Diao ', # 0xe7
'Tiao ', # 0xe8
'Mao ', # 0xe9
'Tong ', # 0xea
'Zhu ', # 0xeb
'Peng ', # 0xec
'An ', # 0xed
'Lian ', # 0xee
'Cong ', # 0xef
'Xi ', # 0xf0
'Ping ', # 0xf1
'Qiu ', # 0xf2
'Jin ', # 0xf3
'Chun ', # 0xf4
'Jie ', # 0xf5
'Wei ', # 0xf6
'Tui ', # 0xf7
'Cao ', # 0xf8
'Yu ', # 0xf9
'Yi ', # 0xfa
'Ji ', # 0xfb
'Liao ', # 0xfc
'Bi ', # 0xfd
'Lu ', # 0xfe
'Su ', # 0xff
)
|
crosswalk-project/chromium-crosswalk-efl
|
refs/heads/efl/crosswalk-10/39.0.2171.19
|
third_party/cython/src/Cython/Compiler/PyrexTypes.py
|
87
|
#
# Cython/Python language types
#
from Code import UtilityCode, LazyUtilityCode, TempitaUtilityCode
import StringEncoding
import Naming
import copy
from Errors import error
class BaseType(object):
#
# Base class for all Cython types including pseudo-types.
# List of attribute names of any subtypes
subtypes = []
def can_coerce_to_pyobject(self, env):
return False
def cast_code(self, expr_code):
return "((%s)%s)" % (self.declaration_code(""), expr_code)
def specialization_name(self):
# This is not entirely robust.
safe = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789'
all = []
for c in self.declaration_code("").replace("unsigned ", "unsigned_").replace("long long", "long_long").replace(" ", "__"):
if c in safe:
all.append(c)
else:
all.append('_%x_' % ord(c))
return ''.join(all)
def base_declaration_code(self, base_code, entity_code):
if entity_code:
return "%s %s" % (base_code, entity_code)
else:
return base_code
def __deepcopy__(self, memo):
"""
Types never need to be copied, if we do copy, Unfortunate Things
Will Happen!
"""
return self
def get_fused_types(self, result=None, seen=None, subtypes=None):
subtypes = subtypes or self.subtypes
if subtypes:
if result is None:
result = []
seen = set()
for attr in subtypes:
list_or_subtype = getattr(self, attr)
if list_or_subtype:
if isinstance(list_or_subtype, BaseType):
list_or_subtype.get_fused_types(result, seen)
else:
for subtype in list_or_subtype:
subtype.get_fused_types(result, seen)
return result
return None
def specialize_fused(self, env):
if env.fused_to_specific:
return self.specialize(env.fused_to_specific)
return self
def _get_fused_types(self):
"""
Add this indirection for the is_fused property to allow overriding
get_fused_types in subclasses.
"""
return self.get_fused_types()
is_fused = property(_get_fused_types, doc="Whether this type or any of its "
"subtypes is a fused type")
def deduce_template_params(self, actual):
"""
Deduce any template params in this (argument) type given the actual
argument type.
http://en.cppreference.com/w/cpp/language/function_template#Template_argument_deduction
"""
if self == actual:
return {}
else:
return None
def __lt__(self, other):
"""
For sorting. The sorting order should correspond to the preference of
conversion from Python types.
Override to provide something sensible. This is only implemented so that
python 3 doesn't trip
"""
return id(type(self)) < id(type(other))
def py_type_name(self):
"""
Return the name of the Python type that can coerce to this type.
"""
def typeof_name(self):
"""
Return the string with which fused python functions can be indexed.
"""
if self.is_builtin_type or self.py_type_name() == 'object':
index_name = self.py_type_name()
else:
index_name = str(self)
return index_name
def check_for_null_code(self, cname):
"""
Return the code for a NULL-check in case an UnboundLocalError should
be raised if an entry of this type is referenced before assignment.
Returns None if no check should be performed.
"""
return None
def invalid_value(self):
"""
Returns the most invalid value an object of this type can assume as a
C expression string. Returns None if no such value exists.
"""
class PyrexType(BaseType):
#
# Base class for all Cython types
#
# is_pyobject boolean Is a Python object type
# is_extension_type boolean Is a Python extension type
# is_final_type boolean Is a final extension type
# is_numeric boolean Is a C numeric type
# is_int boolean Is a C integer type
# is_float boolean Is a C floating point type
# is_complex boolean Is a C complex type
# is_void boolean Is the C void type
# is_array boolean Is a C array type
# is_ptr boolean Is a C pointer type
# is_null_ptr boolean Is the type of NULL
# is_reference boolean Is a C reference type
# is_const boolean Is a C const type.
# is_cfunction boolean Is a C function type
# is_struct_or_union boolean Is a C struct or union type
# is_struct boolean Is a C struct type
# is_enum boolean Is a C enum type
# is_typedef boolean Is a typedef type
# is_string boolean Is a C char * type
# is_pyunicode_ptr boolean Is a C PyUNICODE * type
# is_cpp_string boolean Is a C++ std::string type
# is_unicode_char boolean Is either Py_UCS4 or Py_UNICODE
# is_returncode boolean Is used only to signal exceptions
# is_error boolean Is the dummy error type
# is_buffer boolean Is buffer access type
# has_attributes boolean Has C dot-selectable attributes
# default_value string Initial value
# entry Entry The Entry for this type
#
# declaration_code(entity_code,
# for_display = 0, dll_linkage = None, pyrex = 0)
# Returns a code fragment for the declaration of an entity
# of this type, given a code fragment for the entity.
# * If for_display, this is for reading by a human in an error
# message; otherwise it must be valid C code.
# * If dll_linkage is not None, it must be 'DL_EXPORT' or
# 'DL_IMPORT', and will be added to the base type part of
# the declaration.
# * If pyrex = 1, this is for use in a 'cdef extern'
# statement of a Cython include file.
#
# assignable_from(src_type)
# Tests whether a variable of this type can be
# assigned a value of type src_type.
#
# same_as(other_type)
# Tests whether this type represents the same type
# as other_type.
#
# as_argument_type():
# Coerces array and C function types into pointer type for use as
# a formal argument type.
#
is_pyobject = 0
is_unspecified = 0
is_extension_type = 0
is_final_type = 0
is_builtin_type = 0
is_numeric = 0
is_int = 0
is_float = 0
is_complex = 0
is_void = 0
is_array = 0
is_ptr = 0
is_null_ptr = 0
is_reference = 0
is_const = 0
is_cfunction = 0
is_struct_or_union = 0
is_cpp_class = 0
is_cpp_string = 0
is_struct = 0
is_enum = 0
is_typedef = 0
is_string = 0
is_pyunicode_ptr = 0
is_unicode_char = 0
is_returncode = 0
is_error = 0
is_buffer = 0
is_memoryviewslice = 0
has_attributes = 0
default_value = ""
def resolve(self):
# If a typedef, returns the base type.
return self
def specialize(self, values):
# TODO(danilo): Override wherever it makes sense.
return self
def literal_code(self, value):
# Returns a C code fragment representing a literal
# value of this type.
return str(value)
def __str__(self):
return self.declaration_code("", for_display = 1).strip()
def same_as(self, other_type, **kwds):
return self.same_as_resolved_type(other_type.resolve(), **kwds)
def same_as_resolved_type(self, other_type):
return self == other_type or other_type is error_type
def subtype_of(self, other_type):
return self.subtype_of_resolved_type(other_type.resolve())
def subtype_of_resolved_type(self, other_type):
return self.same_as(other_type)
def assignable_from(self, src_type):
return self.assignable_from_resolved_type(src_type.resolve())
def assignable_from_resolved_type(self, src_type):
return self.same_as(src_type)
def as_argument_type(self):
return self
def is_complete(self):
# A type is incomplete if it is an unsized array,
# a struct whose attributes are not defined, etc.
return 1
def is_simple_buffer_dtype(self):
return (self.is_int or self.is_float or self.is_complex or self.is_pyobject or
self.is_extension_type or self.is_ptr)
def struct_nesting_depth(self):
# Returns the number levels of nested structs. This is
# used for constructing a stack for walking the run-time
# type information of the struct.
return 1
def global_init_code(self, entry, code):
# abstract
pass
def needs_nonecheck(self):
return 0
def public_decl(base_code, dll_linkage):
if dll_linkage:
return "%s(%s)" % (dll_linkage, base_code)
else:
return base_code
def create_typedef_type(name, base_type, cname, is_external=0):
is_fused = base_type.is_fused
if base_type.is_complex or is_fused:
if is_external:
if is_fused:
msg = "Fused"
else:
msg = "Complex"
raise ValueError("%s external typedefs not supported" % msg)
return base_type
else:
return CTypedefType(name, base_type, cname, is_external)
class CTypedefType(BaseType):
#
# Pseudo-type defined with a ctypedef statement in a
# 'cdef extern from' block.
# Delegates most attribute lookups to the base type.
# (Anything not defined here or in the BaseType is delegated.)
#
# qualified_name string
# typedef_name string
# typedef_cname string
# typedef_base_type PyrexType
# typedef_is_external bool
is_typedef = 1
typedef_is_external = 0
to_py_utility_code = None
from_py_utility_code = None
subtypes = ['typedef_base_type']
def __init__(self, name, base_type, cname, is_external=0):
assert not base_type.is_complex
self.typedef_name = name
self.typedef_cname = cname
self.typedef_base_type = base_type
self.typedef_is_external = is_external
def invalid_value(self):
return self.typedef_base_type.invalid_value()
def resolve(self):
return self.typedef_base_type.resolve()
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.typedef_name
else:
base_code = public_decl(self.typedef_cname, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def as_argument_type(self):
return self
def cast_code(self, expr_code):
# If self is really an array (rather than pointer), we can't cast.
# For example, the gmp mpz_t.
if self.typedef_base_type.is_array:
base_type = self.typedef_base_type.base_type
return CPtrType(base_type).cast_code(expr_code)
else:
return BaseType.cast_code(self, expr_code)
def __repr__(self):
return "<CTypedefType %s>" % self.typedef_cname
def __str__(self):
return self.typedef_name
def _create_utility_code(self, template_utility_code,
template_function_name):
type_name = self.typedef_cname.replace(" ","_").replace("::","__")
utility_code = template_utility_code.specialize(
type = self.typedef_cname,
TypeName = type_name)
function_name = template_function_name % type_name
return utility_code, function_name
def create_to_py_utility_code(self, env):
if self.typedef_is_external:
if not self.to_py_utility_code:
base_type = self.typedef_base_type
if type(base_type) is CIntType:
self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load(
"CIntToPy", "TypeConversion.c",
context={"TYPE": self.declaration_code(''),
"TO_PY_FUNCTION": self.to_py_function}))
return True
elif base_type.is_float:
pass # XXX implement!
elif base_type.is_complex:
pass # XXX implement!
pass
if self.to_py_utility_code:
env.use_utility_code(self.to_py_utility_code)
return True
# delegation
return self.typedef_base_type.create_to_py_utility_code(env)
def create_from_py_utility_code(self, env):
if self.typedef_is_external:
if not self.from_py_utility_code:
base_type = self.typedef_base_type
if type(base_type) is CIntType:
self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load(
"CIntFromPy", "TypeConversion.c",
context={"TYPE": self.declaration_code(''),
"FROM_PY_FUNCTION": self.from_py_function}))
return True
elif base_type.is_float:
pass # XXX implement!
elif base_type.is_complex:
pass # XXX implement!
if self.from_py_utility_code:
env.use_utility_code(self.from_py_utility_code)
return True
# delegation
return self.typedef_base_type.create_from_py_utility_code(env)
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
type = self.declaration_code("")
name = self.specialization_name()
if binop == "lshift":
env.use_utility_code(TempitaUtilityCode.load(
"LeftShift", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
else:
if const_rhs:
binop += "_const"
_load_overflow_base(env)
env.use_utility_code(TempitaUtilityCode.load(
"SizeCheck", "Overflow.c",
context={'TYPE': type, 'NAME': name}))
env.use_utility_code(TempitaUtilityCode.load(
"Binop", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
return "__Pyx_%s_%s_checking_overflow" % (binop, name)
def error_condition(self, result_code):
if self.typedef_is_external:
if self.exception_value:
condition = "(%s == (%s)%s)" % (
result_code, self.typedef_cname, self.exception_value)
if self.exception_check:
condition += " && PyErr_Occurred()"
return condition
# delegation
return self.typedef_base_type.error_condition(result_code)
def __getattr__(self, name):
return getattr(self.typedef_base_type, name)
def py_type_name(self):
return self.typedef_base_type.py_type_name()
def can_coerce_to_pyobject(self, env):
return self.typedef_base_type.can_coerce_to_pyobject(env)
class MemoryViewSliceType(PyrexType):
is_memoryviewslice = 1
has_attributes = 1
scope = None
# These are special cased in Defnode
from_py_function = None
to_py_function = None
exception_value = None
exception_check = True
subtypes = ['dtype']
def __init__(self, base_dtype, axes):
"""
MemoryViewSliceType(base, axes)
Base is the C base type; axes is a list of (access, packing) strings,
where access is one of 'full', 'direct' or 'ptr' and packing is one of
'contig', 'strided' or 'follow'. There is one (access, packing) tuple
for each dimension.
the access specifiers determine whether the array data contains
pointers that need to be dereferenced along that axis when
retrieving/setting:
'direct' -- No pointers stored in this dimension.
'ptr' -- Pointer stored in this dimension.
'full' -- Check along this dimension, don't assume either.
the packing specifiers specify how the array elements are layed-out
in memory.
'contig' -- The data are contiguous in memory along this dimension.
At most one dimension may be specified as 'contig'.
'strided' -- The data aren't contiguous along this dimenison.
'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension
has its stride automatically computed from extents of the other
dimensions to ensure C or Fortran memory layout.
C-contiguous memory has 'direct' as the access spec, 'contig' as the
*last* axis' packing spec and 'follow' for all other packing specs.
Fortran-contiguous memory has 'direct' as the access spec, 'contig' as
the *first* axis' packing spec and 'follow' for all other packing
specs.
"""
import MemoryView
self.dtype = base_dtype
self.axes = axes
self.ndim = len(axes)
self.flags = MemoryView.get_buf_flags(self.axes)
self.is_c_contig, self.is_f_contig = MemoryView.is_cf_contig(self.axes)
assert not (self.is_c_contig and self.is_f_contig)
self.mode = MemoryView.get_mode(axes)
self.writable_needed = False
if not self.dtype.is_fused:
self.dtype_name = MemoryView.mangle_dtype_name(self.dtype)
def same_as_resolved_type(self, other_type):
return ((other_type.is_memoryviewslice and
self.dtype.same_as(other_type.dtype) and
self.axes == other_type.axes) or
other_type is error_type)
def needs_nonecheck(self):
return True
def is_complete(self):
# incomplete since the underlying struct doesn't have a cython.memoryview object.
return 0
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
# XXX: we put these guards in for now...
assert not pyrex
assert not dll_linkage
import MemoryView
return self.base_declaration_code(
MemoryView.memviewslice_cname,
entity_code)
def attributes_known(self):
if self.scope is None:
import Symtab
self.scope = scope = Symtab.CClassScope(
'mvs_class_'+self.specialization_suffix(),
None,
visibility='extern')
scope.parent_type = self
scope.directives = {}
scope.declare_var('_data', c_char_ptr_type, None,
cname='data', is_cdef=1)
return True
def declare_attribute(self, attribute, env, pos):
import MemoryView, Options
scope = self.scope
if attribute == 'shape':
scope.declare_var('shape',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='shape',
is_cdef=1)
elif attribute == 'strides':
scope.declare_var('strides',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='strides',
is_cdef=1)
elif attribute == 'suboffsets':
scope.declare_var('suboffsets',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='suboffsets',
is_cdef=1)
elif attribute in ("copy", "copy_fortran"):
ndim = len(self.axes)
to_axes_c = [('direct', 'contig')]
to_axes_f = [('direct', 'contig')]
if ndim - 1:
to_axes_c = [('direct', 'follow')]*(ndim-1) + to_axes_c
to_axes_f = to_axes_f + [('direct', 'follow')]*(ndim-1)
to_memview_c = MemoryViewSliceType(self.dtype, to_axes_c)
to_memview_f = MemoryViewSliceType(self.dtype, to_axes_f)
for to_memview, cython_name in [(to_memview_c, "copy"),
(to_memview_f, "copy_fortran")]:
entry = scope.declare_cfunction(cython_name,
CFuncType(self, [CFuncTypeArg("memviewslice", self, None)]),
pos=pos,
defining=1,
cname=MemoryView.copy_c_or_fortran_cname(to_memview))
#entry.utility_code_definition = \
env.use_utility_code(MemoryView.get_copy_new_utility(pos, self, to_memview))
MemoryView.use_cython_array_utility_code(env)
elif attribute in ("is_c_contig", "is_f_contig"):
# is_c_contig and is_f_contig functions
for (c_or_f, cython_name) in (('c', 'is_c_contig'), ('f', 'is_f_contig')):
is_contig_name = \
MemoryView.get_is_contig_func_name(c_or_f, self.ndim)
cfunctype = CFuncType(
return_type=c_bint_type,
args=[CFuncTypeArg("memviewslice", self, None)],
exception_value="-1",
)
entry = scope.declare_cfunction(cython_name,
cfunctype,
pos=pos,
defining=1,
cname=is_contig_name)
entry.utility_code_definition = MemoryView.get_is_contig_utility(
attribute == 'is_c_contig', self.ndim)
return True
def specialization_suffix(self):
return "%s_%s" % (self.axes_to_name(), self.dtype_name)
def can_coerce_to_pyobject(self, env):
return True
def check_for_null_code(self, cname):
return cname + '.memview'
def create_from_py_utility_code(self, env):
import MemoryView, Buffer
# We don't have 'code', so use a LazyUtilityCode with a callback.
def lazy_utility_callback(code):
context['dtype_typeinfo'] = Buffer.get_type_information_cname(
code, self.dtype)
return TempitaUtilityCode.load(
"ObjectToMemviewSlice", "MemoryView_C.c", context=context)
env.use_utility_code(Buffer.acquire_utility_code)
env.use_utility_code(MemoryView.memviewslice_init_code)
env.use_utility_code(LazyUtilityCode(lazy_utility_callback))
if self.is_c_contig:
c_or_f_flag = "__Pyx_IS_C_CONTIG"
elif self.is_f_contig:
c_or_f_flag = "__Pyx_IS_F_CONTIG"
else:
c_or_f_flag = "0"
suffix = self.specialization_suffix()
funcname = "__Pyx_PyObject_to_MemoryviewSlice_" + suffix
context = dict(
MemoryView.context,
buf_flag = self.flags,
ndim = self.ndim,
axes_specs = ', '.join(self.axes_to_code()),
dtype_typedecl = self.dtype.declaration_code(""),
struct_nesting_depth = self.dtype.struct_nesting_depth(),
c_or_f_flag = c_or_f_flag,
funcname = funcname,
)
self.from_py_function = funcname
return True
def create_to_py_utility_code(self, env):
return True
def get_to_py_function(self, env, obj):
to_py_func, from_py_func = self.dtype_object_conversion_funcs(env)
to_py_func = "(PyObject *(*)(char *)) " + to_py_func
from_py_func = "(int (*)(char *, PyObject *)) " + from_py_func
tup = (obj.result(), self.ndim, to_py_func, from_py_func,
self.dtype.is_pyobject)
return "__pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup
def dtype_object_conversion_funcs(self, env):
get_function = "__pyx_memview_get_%s" % self.dtype_name
set_function = "__pyx_memview_set_%s" % self.dtype_name
context = dict(
get_function = get_function,
set_function = set_function,
)
if self.dtype.is_pyobject:
utility_name = "MemviewObjectToObject"
else:
to_py = self.dtype.create_to_py_utility_code(env)
from_py = self.dtype.create_from_py_utility_code(env)
if not (to_py or from_py):
return "NULL", "NULL"
if not self.dtype.to_py_function:
get_function = "NULL"
if not self.dtype.from_py_function:
set_function = "NULL"
utility_name = "MemviewDtypeToObject"
error_condition = (self.dtype.error_condition('value') or
'PyErr_Occurred()')
context.update(
to_py_function = self.dtype.to_py_function,
from_py_function = self.dtype.from_py_function,
dtype = self.dtype.declaration_code(""),
error_condition = error_condition,
)
utility = TempitaUtilityCode.load(
utility_name, "MemoryView_C.c", context=context)
env.use_utility_code(utility)
return get_function, set_function
def axes_to_code(self):
"""Return a list of code constants for each axis"""
import MemoryView
d = MemoryView._spec_to_const
return ["(%s | %s)" % (d[a], d[p]) for a, p in self.axes]
def axes_to_name(self):
"""Return an abbreviated name for our axes"""
import MemoryView
d = MemoryView._spec_to_abbrev
return "".join(["%s%s" % (d[a], d[p]) for a, p in self.axes])
def error_condition(self, result_code):
return "!%s.memview" % result_code
def __str__(self):
import MemoryView
axes_code_list = []
for idx, (access, packing) in enumerate(self.axes):
flag = MemoryView.get_memoryview_flag(access, packing)
if flag == "strided":
axes_code_list.append(":")
else:
if flag == 'contiguous':
have_follow = [p for a, p in self.axes[idx - 1:idx + 2]
if p == 'follow']
if have_follow or self.ndim == 1:
flag = '1'
axes_code_list.append("::" + flag)
if self.dtype.is_pyobject:
dtype_name = self.dtype.name
else:
dtype_name = self.dtype
return "%s[%s]" % (dtype_name, ", ".join(axes_code_list))
def specialize(self, values):
"""This does not validate the base type!!"""
dtype = self.dtype.specialize(values)
if dtype is not self.dtype:
return MemoryViewSliceType(dtype, self.axes)
return self
def cast_code(self, expr_code):
return expr_code
class BufferType(BaseType):
#
# Delegates most attribute lookups to the base type.
# (Anything not defined here or in the BaseType is delegated.)
#
# dtype PyrexType
# ndim int
# mode str
# negative_indices bool
# cast bool
# is_buffer bool
# writable bool
is_buffer = 1
writable = True
subtypes = ['dtype']
def __init__(self, base, dtype, ndim, mode, negative_indices, cast):
self.base = base
self.dtype = dtype
self.ndim = ndim
self.buffer_ptr_type = CPtrType(dtype)
self.mode = mode
self.negative_indices = negative_indices
self.cast = cast
def as_argument_type(self):
return self
def specialize(self, values):
dtype = self.dtype.specialize(values)
if dtype is not self.dtype:
return BufferType(self.base, dtype, self.ndim, self.mode,
self.negative_indices, self.cast)
return self
def __getattr__(self, name):
return getattr(self.base, name)
def __repr__(self):
return "<BufferType %r>" % self.base
def __str__(self):
# avoid ', ', as fused functions split the signature string on ', '
cast_str = ''
if self.cast:
cast_str = ',cast=True'
return "%s[%s,ndim=%d%s]" % (self.base, self.dtype, self.ndim,
cast_str)
def assignable_from(self, other_type):
if other_type.is_buffer:
return (self.same_as(other_type, compare_base=False) and
self.base.assignable_from(other_type.base))
return self.base.assignable_from(other_type)
def same_as(self, other_type, compare_base=True):
if not other_type.is_buffer:
return other_type.same_as(self.base)
return (self.dtype.same_as(other_type.dtype) and
self.ndim == other_type.ndim and
self.mode == other_type.mode and
self.cast == other_type.cast and
(not compare_base or self.base.same_as(other_type.base)))
class PyObjectType(PyrexType):
#
# Base class for all Python object types (reference-counted).
#
# buffer_defaults dict or None Default options for bu
name = "object"
is_pyobject = 1
default_value = "0"
buffer_defaults = None
is_extern = False
is_subclassed = False
is_gc_simple = False
def __str__(self):
return "Python object"
def __repr__(self):
return "<PyObjectType>"
def can_coerce_to_pyobject(self, env):
return True
def default_coerced_ctype(self):
"""The default C type that this Python type coerces to, or None."""
return None
def assignable_from(self, src_type):
# except for pointers, conversion will be attempted
return not src_type.is_ptr or src_type.is_string or src_type.is_pyunicode_ptr
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "object"
else:
base_code = public_decl("PyObject", dll_linkage)
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def as_pyobject(self, cname):
if (not self.is_complete()) or self.is_extension_type:
return "(PyObject *)" + cname
else:
return cname
def py_type_name(self):
return "object"
def __lt__(self, other):
"""
Make sure we sort highest, as instance checking on py_type_name
('object') is always true
"""
return False
def global_init_code(self, entry, code):
code.put_init_var_to_py_none(entry, nanny=False)
def check_for_null_code(self, cname):
return cname
builtin_types_that_cannot_create_refcycles = set([
'bool', 'int', 'long', 'float', 'complex',
'bytearray', 'bytes', 'unicode', 'str', 'basestring'
])
class BuiltinObjectType(PyObjectType):
# objstruct_cname string Name of PyObject struct
is_builtin_type = 1
has_attributes = 1
base_type = None
module_name = '__builtin__'
# fields that let it look like an extension type
vtabslot_cname = None
vtabstruct_cname = None
vtabptr_cname = None
typedef_flag = True
is_external = True
def __init__(self, name, cname, objstruct_cname=None):
self.name = name
self.cname = cname
self.typeptr_cname = "(&%s)" % cname
self.objstruct_cname = objstruct_cname
self.is_gc_simple = name in builtin_types_that_cannot_create_refcycles
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def __str__(self):
return "%s object" % self.name
def __repr__(self):
return "<%s>"% self.cname
def default_coerced_ctype(self):
if self.name in ('bytes', 'bytearray'):
return c_char_ptr_type
elif self.name == 'bool':
return c_bint_type
elif self.name == 'float':
return c_double_type
return None
def assignable_from(self, src_type):
if isinstance(src_type, BuiltinObjectType):
if self.name == 'basestring':
return src_type.name in ('str', 'unicode', 'basestring')
else:
return src_type.name == self.name
elif src_type.is_extension_type:
# FIXME: This is an ugly special case that we currently
# keep supporting. It allows users to specify builtin
# types as external extension types, while keeping them
# compatible with the real builtin types. We already
# generate a warning for it. Big TODO: remove!
return (src_type.module_name == '__builtin__' and
src_type.name == self.name)
else:
return True
def typeobj_is_available(self):
return True
def attributes_known(self):
return True
def subtype_of(self, type):
return type.is_pyobject and type.assignable_from(self)
def type_check_function(self, exact=True):
type_name = self.name
if type_name == 'str':
type_check = 'PyString_Check'
elif type_name == 'basestring':
type_check = '__Pyx_PyBaseString_Check'
elif type_name == 'bytearray':
type_check = 'PyByteArray_Check'
elif type_name == 'frozenset':
type_check = 'PyFrozenSet_Check'
else:
type_check = 'Py%s_Check' % type_name.capitalize()
if exact and type_name not in ('bool', 'slice'):
type_check += 'Exact'
return type_check
def isinstance_code(self, arg):
return '%s(%s)' % (self.type_check_function(exact=False), arg)
def type_test_code(self, arg, notnone=False, exact=True):
type_check = self.type_check_function(exact=exact)
check = 'likely(%s(%s))' % (type_check, arg)
if not notnone:
check += '||((%s) == Py_None)' % arg
if self.name == 'basestring':
name = '(PY_MAJOR_VERSION < 3 ? "basestring" : "str")'
space_for_name = 16
else:
name = '"%s"' % self.name
# avoid wasting too much space but limit number of different format strings
space_for_name = (len(self.name) // 16 + 1) * 16
error = '(PyErr_Format(PyExc_TypeError, "Expected %%.%ds, got %%.200s", %s, Py_TYPE(%s)->tp_name), 0)' % (
space_for_name, name, arg)
return check + '||' + error
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
else:
base_code = public_decl("PyObject", dll_linkage)
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def cast_code(self, expr_code, to_object_struct = False):
return "((%s*)%s)" % (
to_object_struct and self.objstruct_cname or "PyObject", # self.objstruct_cname may be None
expr_code)
def py_type_name(self):
return self.name
class PyExtensionType(PyObjectType):
#
# A Python extension type.
#
# name string
# scope CClassScope Attribute namespace
# visibility string
# typedef_flag boolean
# base_type PyExtensionType or None
# module_name string or None Qualified name of defining module
# objstruct_cname string Name of PyObject struct
# objtypedef_cname string Name of PyObject struct typedef
# typeobj_cname string or None C code fragment referring to type object
# typeptr_cname string or None Name of pointer to external type object
# vtabslot_cname string Name of C method table member
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
# defered_declarations [thunk] Used to declare class hierarchies in order
is_extension_type = 1
has_attributes = 1
objtypedef_cname = None
def __init__(self, name, typedef_flag, base_type, is_external=0):
self.name = name
self.scope = None
self.typedef_flag = typedef_flag
if base_type is not None:
base_type.is_subclassed = True
self.base_type = base_type
self.module_name = None
self.objstruct_cname = None
self.typeobj_cname = None
self.typeptr_cname = None
self.vtabslot_cname = None
self.vtabstruct_cname = None
self.vtabptr_cname = None
self.vtable_cname = None
self.is_external = is_external
self.defered_declarations = []
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def needs_nonecheck(self):
return True
def subtype_of_resolved_type(self, other_type):
if other_type.is_extension_type or other_type.is_builtin_type:
return self is other_type or (
self.base_type and self.base_type.subtype_of(other_type))
else:
return other_type is py_object_type
def typeobj_is_available(self):
# Do we have a pointer to the type object?
return self.typeptr_cname
def typeobj_is_imported(self):
# If we don't know the C name of the type object but we do
# know which module it's defined in, it will be imported.
return self.typeobj_cname is None and self.module_name is not None
def assignable_from(self, src_type):
if self == src_type:
return True
if isinstance(src_type, PyExtensionType):
if src_type.base_type is not None:
return self.assignable_from(src_type.base_type)
if isinstance(src_type, BuiltinObjectType):
# FIXME: This is an ugly special case that we currently
# keep supporting. It allows users to specify builtin
# types as external extension types, while keeping them
# compatible with the real builtin types. We already
# generate a warning for it. Big TODO: remove!
return (self.module_name == '__builtin__' and
self.name == src_type.name)
return False
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0, deref = 0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
objstruct = self.objstruct_cname
else:
objstruct = "struct %s" % self.objstruct_cname
base_code = public_decl(objstruct, dll_linkage)
if deref:
assert not entity_code
else:
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def type_test_code(self, py_arg, notnone=False):
none_check = "((%s) == Py_None)" % py_arg
type_check = "likely(__Pyx_TypeTest(%s, %s))" % (
py_arg, self.typeptr_cname)
if notnone:
return type_check
else:
return "likely(%s || %s)" % (none_check, type_check)
def attributes_known(self):
return self.scope is not None
def __str__(self):
return self.name
def __repr__(self):
return "<PyExtensionType %s%s>" % (self.scope.class_name,
("", " typedef")[self.typedef_flag])
def py_type_name(self):
if not self.module_name:
return self.name
return "__import__(%r, None, None, ['']).%s" % (self.module_name,
self.name)
class CType(PyrexType):
#
# Base class for all C types (non-reference-counted).
#
# to_py_function string C function for converting to Python object
# from_py_function string C function for constructing from Python object
#
to_py_function = None
from_py_function = None
exception_value = None
exception_check = 1
def create_to_py_utility_code(self, env):
return self.to_py_function is not None
def create_from_py_utility_code(self, env):
return self.from_py_function is not None
def can_coerce_to_pyobject(self, env):
return self.create_to_py_utility_code(env)
def error_condition(self, result_code):
conds = []
if self.is_string or self.is_pyunicode_ptr:
conds.append("(!%s)" % result_code)
elif self.exception_value is not None:
conds.append("(%s == (%s)%s)" % (result_code, self.sign_and_name(), self.exception_value))
if self.exception_check:
conds.append("PyErr_Occurred()")
if len(conds) > 0:
return " && ".join(conds)
else:
return 0
class CConstType(BaseType):
is_const = 1
def __init__(self, const_base_type):
self.const_base_type = const_base_type
if const_base_type.has_attributes and const_base_type.scope is not None:
import Symtab
self.scope = Symtab.CConstScope(const_base_type.scope)
def __repr__(self):
return "<CConstType %s>" % repr(self.const_base_type)
def __str__(self):
return self.declaration_code("", for_display=1)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return self.const_base_type.declaration_code("const %s" % entity_code, for_display, dll_linkage, pyrex)
def specialize(self, values):
base_type = self.const_base_type.specialize(values)
if base_type == self.const_base_type:
return self
else:
return CConstType(base_type)
def deduce_template_params(self, actual):
return self.const_base_type.deduce_template_params(actual)
def create_to_py_utility_code(self, env):
if self.const_base_type.create_to_py_utility_code(env):
self.to_py_function = self.const_base_type.to_py_function
return True
def __getattr__(self, name):
return getattr(self.const_base_type, name)
class FusedType(CType):
"""
Represents a Fused Type. All it needs to do is keep track of the types
it aggregates, as it will be replaced with its specific version wherever
needed.
See http://wiki.cython.org/enhancements/fusedtypes
types [PyrexType] is the list of types to be fused
name str the name of the ctypedef
"""
is_fused = 1
exception_check = 0
def __init__(self, types, name=None):
self.types = types
self.name = name
def declaration_code(self, entity_code, for_display = 0,
dll_linkage = None, pyrex = 0):
if pyrex or for_display:
return self.name
raise Exception("This may never happen, please report a bug")
def __repr__(self):
return 'FusedType(name=%r)' % self.name
def specialize(self, values):
return values[self]
def get_fused_types(self, result=None, seen=None):
if result is None:
return [self]
if self not in seen:
result.append(self)
seen.add(self)
class CVoidType(CType):
#
# C "void" type
#
is_void = 1
def __repr__(self):
return "<CVoidType>"
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "void"
else:
base_code = public_decl("void", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def is_complete(self):
return 0
class InvisibleVoidType(CVoidType):
#
# For use with C++ constructors and destructors return types.
# Acts like void, but does not print out a declaration.
#
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "[void]"
else:
base_code = public_decl("", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
class CNumericType(CType):
#
# Base class for all C numeric types.
#
# rank integer Relative size
# signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed
#
is_numeric = 1
default_value = "0"
has_attributes = True
scope = None
sign_words = ("unsigned ", "", "signed ")
def __init__(self, rank, signed = 1):
self.rank = rank
self.signed = signed
def sign_and_name(self):
s = self.sign_words[self.signed]
n = rank_to_type_name[self.rank]
return s + n
def __repr__(self):
return "<CNumericType %s>" % self.sign_and_name()
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
type_name = self.sign_and_name()
if pyrex or for_display:
base_code = type_name.replace('PY_LONG_LONG', 'long long')
else:
base_code = public_decl(type_name, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def attributes_known(self):
if self.scope is None:
import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern")
scope.parent_type = self
scope.directives = {}
scope.declare_cfunction(
"conjugate",
CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
pos=None,
defining=1,
cname=" ")
return True
def __lt__(self, other):
"""Sort based on rank, preferring signed over unsigned"""
if other.is_numeric:
return self.rank > other.rank and self.signed >= other.signed
# Prefer numeric types over others
return True
def py_type_name(self):
if self.rank <= 4:
return "(int, long)"
return "float"
class ForbidUseClass:
def __repr__(self):
raise RuntimeError()
def __str__(self):
raise RuntimeError()
ForbidUse = ForbidUseClass()
class CIntType(CNumericType):
is_int = 1
typedef_flag = 0
to_py_function = None
from_py_function = None
exception_value = -1
def create_to_py_utility_code(self, env):
if type(self).to_py_function is None:
self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load(
"CIntToPy", "TypeConversion.c",
context={"TYPE": self.declaration_code(''),
"TO_PY_FUNCTION": self.to_py_function}))
return True
def create_from_py_utility_code(self, env):
if type(self).from_py_function is None:
self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load(
"CIntFromPy", "TypeConversion.c",
context={"TYPE": self.declaration_code(''),
"FROM_PY_FUNCTION": self.from_py_function}))
return True
def get_to_py_type_conversion(self):
if self.rank < list(rank_to_type_name).index('int'):
# This assumes sizeof(short) < sizeof(int)
return "PyInt_FromLong"
else:
# Py{Int|Long}_From[Unsigned]Long[Long]
Prefix = "Int"
SignWord = ""
TypeName = "Long"
if not self.signed:
Prefix = "Long"
SignWord = "Unsigned"
if self.rank >= list(rank_to_type_name).index('PY_LONG_LONG'):
Prefix = "Long"
TypeName = "LongLong"
return "Py%s_From%s%s" % (Prefix, SignWord, TypeName)
def get_from_py_type_conversion(self):
type_name = rank_to_type_name[self.rank]
type_name = type_name.replace("PY_LONG_LONG", "long long")
TypeName = type_name.title().replace(" ", "")
SignWord = self.sign_words[self.signed].strip().title()
if self.rank >= list(rank_to_type_name).index('long'):
utility_code = c_long_from_py_function
else:
utility_code = c_int_from_py_function
utility_code.specialize(self,
SignWord=SignWord,
TypeName=TypeName)
func_name = "__Pyx_PyInt_As%s%s" % (SignWord, TypeName)
return func_name
def assignable_from_resolved_type(self, src_type):
return src_type.is_int or src_type.is_enum or src_type is error_type
def invalid_value(self):
if rank_to_type_name[int(self.rank)] == 'char':
return "'?'"
else:
# We do not really know the size of the type, so return
# a 32-bit literal and rely on casting to final type. It will
# be negative for signed ints, which is good.
return "0xbad0bad0"
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
type = self.declaration_code("")
name = self.specialization_name()
if binop == "lshift":
env.use_utility_code(TempitaUtilityCode.load(
"LeftShift", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
else:
if const_rhs:
binop += "_const"
if type in ('int', 'long', 'long long'):
env.use_utility_code(TempitaUtilityCode.load(
"BaseCaseSigned", "Overflow.c",
context={'INT': type, 'NAME': name}))
elif type in ('unsigned int', 'unsigned long', 'unsigned long long'):
env.use_utility_code(TempitaUtilityCode.load(
"BaseCaseUnsigned", "Overflow.c",
context={'UINT': type, 'NAME': name}))
elif self.rank <= 1:
# sizeof(short) < sizeof(int)
return "__Pyx_%s_%s_no_overflow" % (binop, name)
else:
_load_overflow_base(env)
env.use_utility_code(TempitaUtilityCode.load(
"SizeCheck", "Overflow.c",
context={'TYPE': type, 'NAME': name}))
env.use_utility_code(TempitaUtilityCode.load(
"Binop", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
return "__Pyx_%s_%s_checking_overflow" % (binop, name)
def _load_overflow_base(env):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
for type in ('int', 'long', 'long long'):
env.use_utility_code(TempitaUtilityCode.load(
"BaseCaseSigned", "Overflow.c",
context={'INT': type, 'NAME': type.replace(' ', '_')}))
for type in ('unsigned int', 'unsigned long', 'unsigned long long'):
env.use_utility_code(TempitaUtilityCode.load(
"BaseCaseUnsigned", "Overflow.c",
context={'UINT': type, 'NAME': type.replace(' ', '_')}))
class CAnonEnumType(CIntType):
is_enum = 1
def sign_and_name(self):
return 'int'
class CReturnCodeType(CIntType):
to_py_function = "__Pyx_Owned_Py_None"
is_returncode = True
exception_check = False
class CBIntType(CIntType):
to_py_function = "__Pyx_PyBool_FromLong"
from_py_function = "__Pyx_PyObject_IsTrue"
exception_check = 1 # for C++ bool
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = 'bool'
else:
base_code = public_decl('int', dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def __repr__(self):
return "<CNumericType bint>"
def __str__(self):
return 'bint'
def py_type_name(self):
return "bool"
class CPyUCS4IntType(CIntType):
# Py_UCS4
is_unicode_char = True
# Py_UCS4 coerces from and to single character unicode strings (or
# at most two characters on 16bit Unicode builds), but we also
# allow Python integers as input. The value range for Py_UCS4
# is 0..1114111, which is checked when converting from an integer
# value.
to_py_function = "PyUnicode_FromOrdinal"
from_py_function = "__Pyx_PyObject_AsPy_UCS4"
def create_from_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached("ObjectAsUCS4", "TypeConversion.c"))
return True
def sign_and_name(self):
return "Py_UCS4"
class CPyUnicodeIntType(CIntType):
# Py_UNICODE
is_unicode_char = True
# Py_UNICODE coerces from and to single character unicode strings,
# but we also allow Python integers as input. The value range for
# Py_UNICODE is 0..1114111, which is checked when converting from
# an integer value.
to_py_function = "PyUnicode_FromOrdinal"
from_py_function = "__Pyx_PyObject_AsPy_UNICODE"
def create_from_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached("ObjectAsPyUnicode", "TypeConversion.c"))
return True
def sign_and_name(self):
return "Py_UNICODE"
class CPyHashTType(CIntType):
to_py_function = "__Pyx_PyInt_FromHash_t"
from_py_function = "__Pyx_PyInt_AsHash_t"
def sign_and_name(self):
return "Py_hash_t"
class CPySSizeTType(CIntType):
to_py_function = "PyInt_FromSsize_t"
from_py_function = "__Pyx_PyIndex_AsSsize_t"
def sign_and_name(self):
return "Py_ssize_t"
class CSSizeTType(CIntType):
to_py_function = "PyInt_FromSsize_t"
from_py_function = "PyInt_AsSsize_t"
def sign_and_name(self):
return "Py_ssize_t"
class CSizeTType(CIntType):
to_py_function = "__Pyx_PyInt_FromSize_t"
def sign_and_name(self):
return "size_t"
class CPtrdiffTType(CIntType):
def sign_and_name(self):
return "ptrdiff_t"
class CFloatType(CNumericType):
is_float = 1
to_py_function = "PyFloat_FromDouble"
from_py_function = "__pyx_PyFloat_AsDouble"
exception_value = -1
def __init__(self, rank, math_h_modifier = ''):
CNumericType.__init__(self, rank, 1)
self.math_h_modifier = math_h_modifier
if rank == RANK_FLOAT:
self.from_py_function = "__pyx_PyFloat_AsFloat"
def assignable_from_resolved_type(self, src_type):
return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type
def invalid_value(self):
return Naming.PYX_NAN
class CComplexType(CNumericType):
is_complex = 1
to_py_function = "__pyx_PyComplex_FromComplex"
has_attributes = 1
scope = None
def __init__(self, real_type):
while real_type.is_typedef and not real_type.typedef_is_external:
real_type = real_type.typedef_base_type
if real_type.is_typedef and real_type.typedef_is_external:
# The below is not actually used: Coercions are currently disabled
# so that complex types of external types can not be created
self.funcsuffix = "_%s" % real_type.specialization_name()
elif hasattr(real_type, 'math_h_modifier'):
self.funcsuffix = real_type.math_h_modifier
else:
self.funcsuffix = "_%s" % real_type.specialization_name()
self.real_type = real_type
CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed)
self.binops = {}
self.from_parts = "%s_from_parts" % self.specialization_name()
self.default_value = "%s(0, 0)" % self.from_parts
def __eq__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type == other.real_type
else:
return False
def __ne__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type != other.real_type
else:
return True
def __lt__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type < other.real_type
else:
# this is arbitrary, but it makes sure we always have
# *some* kind of order
return False
def __hash__(self):
return ~hash(self.real_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
real_code = self.real_type.declaration_code("", for_display, dll_linkage, pyrex)
base_code = "%s complex" % real_code
else:
base_code = public_decl(self.sign_and_name(), dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def sign_and_name(self):
real_type_name = self.real_type.specialization_name()
real_type_name = real_type_name.replace('long__double','long_double')
real_type_name = real_type_name.replace('PY_LONG_LONG','long_long')
return Naming.type_prefix + real_type_name + "_complex"
def assignable_from(self, src_type):
# Temporary hack/feature disabling, see #441
if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef
and src_type.typedef_is_external):
return False
else:
return super(CComplexType, self).assignable_from(src_type)
def assignable_from_resolved_type(self, src_type):
return (src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type)
or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type)
or src_type is error_type)
def attributes_known(self):
if self.scope is None:
import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern")
scope.parent_type = self
scope.directives = {}
scope.declare_var("real", self.real_type, None, cname="real", is_cdef=True)
scope.declare_var("imag", self.real_type, None, cname="imag", is_cdef=True)
scope.declare_cfunction(
"conjugate",
CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
pos=None,
defining=1,
cname="__Pyx_c_conj%s" % self.funcsuffix)
return True
def create_declaration_utility_code(self, env):
# This must always be run, because a single CComplexType instance can be shared
# across multiple compilations (the one created in the module scope)
env.use_utility_code(complex_header_utility_code)
env.use_utility_code(complex_real_imag_utility_code)
for utility_code in (complex_type_utility_code,
complex_from_parts_utility_code,
complex_arithmetic_utility_code):
env.use_utility_code(
utility_code.specialize(
self,
real_type = self.real_type.declaration_code(''),
m = self.funcsuffix,
is_float = self.real_type.is_float))
return True
def create_to_py_utility_code(self, env):
env.use_utility_code(complex_real_imag_utility_code)
env.use_utility_code(complex_to_py_utility_code)
return True
def create_from_py_utility_code(self, env):
self.real_type.create_from_py_utility_code(env)
for utility_code in (complex_from_parts_utility_code,
complex_from_py_utility_code):
env.use_utility_code(
utility_code.specialize(
self,
real_type = self.real_type.declaration_code(''),
m = self.funcsuffix,
is_float = self.real_type.is_float))
self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name()
return True
def lookup_op(self, nargs, op):
try:
return self.binops[nargs, op]
except KeyError:
pass
try:
op_name = complex_ops[nargs, op]
self.binops[nargs, op] = func_name = "__Pyx_c_%s%s" % (op_name, self.funcsuffix)
return func_name
except KeyError:
return None
def unary_op(self, op):
return self.lookup_op(1, op)
def binary_op(self, op):
return self.lookup_op(2, op)
def py_type_name(self):
return "complex"
def cast_code(self, expr_code):
return expr_code
complex_ops = {
(1, '-'): 'neg',
(1, 'zero'): 'is_zero',
(2, '+'): 'sum',
(2, '-'): 'diff',
(2, '*'): 'prod',
(2, '/'): 'quot',
(2, '=='): 'eq',
}
complex_header_utility_code = UtilityCode(
proto_block='h_code',
proto="""
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
""")
complex_real_imag_utility_code = UtilityCode(
proto="""
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
""")
complex_type_utility_code = UtilityCode(
proto_block='complex_type_declarations',
proto="""
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< %(real_type)s > %(type_name)s;
#else
typedef %(real_type)s _Complex %(type_name)s;
#endif
#else
typedef struct { %(real_type)s real, imag; } %(type_name)s;
#endif
""")
complex_from_parts_utility_code = UtilityCode(
proto_block='utility_code_proto',
proto="""
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s, %(real_type)s);
""",
impl="""
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) {
return ::std::complex< %(real_type)s >(x, y);
}
#else
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) {
return x + y*(%(type)s)_Complex_I;
}
#endif
#else
static CYTHON_INLINE %(type)s %(type_name)s_from_parts(%(real_type)s x, %(real_type)s y) {
%(type)s z;
z.real = x;
z.imag = y;
return z;
}
#endif
""")
complex_to_py_utility_code = UtilityCode(
proto="""
#define __pyx_PyComplex_FromComplex(z) \\
PyComplex_FromDoubles((double)__Pyx_CREAL(z), \\
(double)__Pyx_CIMAG(z))
""")
complex_from_py_utility_code = UtilityCode(
proto="""
static %(type)s __Pyx_PyComplex_As_%(type_name)s(PyObject*);
""",
impl="""
static %(type)s __Pyx_PyComplex_As_%(type_name)s(PyObject* o) {
Py_complex cval;
#if CYTHON_COMPILING_IN_CPYTHON
if (PyComplex_CheckExact(o))
cval = ((PyComplexObject *)o)->cval;
else
#endif
cval = PyComplex_AsCComplex(o);
return %(type_name)s_from_parts(
(%(real_type)s)cval.real,
(%(real_type)s)cval.imag);
}
""")
complex_arithmetic_utility_code = UtilityCode(
proto="""
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq%(m)s(a, b) ((a)==(b))
#define __Pyx_c_sum%(m)s(a, b) ((a)+(b))
#define __Pyx_c_diff%(m)s(a, b) ((a)-(b))
#define __Pyx_c_prod%(m)s(a, b) ((a)*(b))
#define __Pyx_c_quot%(m)s(a, b) ((a)/(b))
#define __Pyx_c_neg%(m)s(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero%(m)s(z) ((z)==(%(real_type)s)0)
#define __Pyx_c_conj%(m)s(z) (::std::conj(z))
#if %(is_float)s
#define __Pyx_c_abs%(m)s(z) (::std::abs(z))
#define __Pyx_c_pow%(m)s(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero%(m)s(z) ((z)==0)
#define __Pyx_c_conj%(m)s(z) (conj%(m)s(z))
#if %(is_float)s
#define __Pyx_c_abs%(m)s(z) (cabs%(m)s(z))
#define __Pyx_c_pow%(m)s(a, b) (cpow%(m)s(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_sum%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_diff%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_prod%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_quot%(m)s(%(type)s, %(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_neg%(m)s(%(type)s);
static CYTHON_INLINE int __Pyx_c_is_zero%(m)s(%(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_conj%(m)s(%(type)s);
#if %(is_float)s
static CYTHON_INLINE %(real_type)s __Pyx_c_abs%(m)s(%(type)s);
static CYTHON_INLINE %(type)s __Pyx_c_pow%(m)s(%(type)s, %(type)s);
#endif
#endif
""",
impl="""
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq%(m)s(%(type)s a, %(type)s b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE %(type)s __Pyx_c_sum%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_diff%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_prod%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_quot%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
%(real_type)s denom = b.real * b.real + b.imag * b.imag;
z.real = (a.real * b.real + a.imag * b.imag) / denom;
z.imag = (a.imag * b.real - a.real * b.imag) / denom;
return z;
}
static CYTHON_INLINE %(type)s __Pyx_c_neg%(m)s(%(type)s a) {
%(type)s z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero%(m)s(%(type)s a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE %(type)s __Pyx_c_conj%(m)s(%(type)s a) {
%(type)s z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if %(is_float)s
static CYTHON_INLINE %(real_type)s __Pyx_c_abs%(m)s(%(type)s z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt%(m)s(z.real*z.real + z.imag*z.imag);
#else
return hypot%(m)s(z.real, z.imag);
#endif
}
static CYTHON_INLINE %(type)s __Pyx_c_pow%(m)s(%(type)s a, %(type)s b) {
%(type)s z;
%(real_type)s r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
%(real_type)s denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
z = __Pyx_c_prod%(m)s(a, a);
return __Pyx_c_prod%(m)s(a, a);
case 3:
z = __Pyx_c_prod%(m)s(a, a);
return __Pyx_c_prod%(m)s(z, a);
case 4:
z = __Pyx_c_prod%(m)s(a, a);
return __Pyx_c_prod%(m)s(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
}
r = a.real;
theta = 0;
} else {
r = __Pyx_c_abs%(m)s(a);
theta = atan2%(m)s(a.imag, a.real);
}
lnr = log%(m)s(r);
z_r = exp%(m)s(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos%(m)s(z_theta);
z.imag = z_r * sin%(m)s(z_theta);
return z;
}
#endif
#endif
""")
class CPointerBaseType(CType):
# common base type for pointer/array types
#
# base_type CType Reference type
subtypes = ['base_type']
def __init__(self, base_type):
self.base_type = base_type
for char_type in (c_char_type, c_uchar_type, c_schar_type):
if base_type.same_as(char_type):
self.is_string = 1
break
else:
if base_type.same_as(c_py_unicode_type):
self.is_pyunicode_ptr = 1
if self.is_string and not base_type.is_error:
if base_type.signed:
self.to_py_function = "__Pyx_PyObject_FromString"
if self.is_ptr:
if base_type.signed == 2:
self.from_py_function = "__Pyx_PyObject_AsSString"
else:
self.from_py_function = "__Pyx_PyObject_AsString"
else:
self.to_py_function = "__Pyx_PyObject_FromUString"
if self.is_ptr:
self.from_py_function = "__Pyx_PyObject_AsUString"
self.exception_value = "NULL"
elif self.is_pyunicode_ptr and not base_type.is_error:
self.to_py_function = "__Pyx_PyUnicode_FromUnicode"
if self.is_ptr:
self.from_py_function = "__Pyx_PyUnicode_AsUnicode"
self.exception_value = "NULL"
def py_type_name(self):
if self.is_string:
return "bytes"
elif self.is_pyunicode_ptr:
return "unicode"
else:
return super(CPointerBaseType, self).py_type_name()
def literal_code(self, value):
if self.is_string:
assert isinstance(value, str)
return '"%s"' % StringEncoding.escape_byte_string(value)
class CArrayType(CPointerBaseType):
# base_type CType Element type
# size integer or None Number of elements
is_array = 1
def __init__(self, base_type, size):
super(CArrayType, self).__init__(base_type)
self.size = size
def __eq__(self, other):
if isinstance(other, CType) and other.is_array and self.size == other.size:
return self.base_type.same_as(other.base_type)
return False
def __hash__(self):
return hash(self.base_type) + 28 # arbitrarily chosen offset
def __repr__(self):
return "<CArrayType %s %s>" % (self.size, repr(self.base_type))
def same_as_resolved_type(self, other_type):
return ((other_type.is_array and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def assignable_from_resolved_type(self, src_type):
# Can't assign to a variable of an array type
return 0
def element_ptr_type(self):
return c_ptr_type(self.base_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.size is not None:
dimension_code = self.size
else:
dimension_code = ""
if entity_code.startswith("*"):
entity_code = "(%s)" % entity_code
return self.base_type.declaration_code(
"%s[%s]" % (entity_code, dimension_code),
for_display, dll_linkage, pyrex)
def as_argument_type(self):
return c_ptr_type(self.base_type)
def is_complete(self):
return self.size is not None
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
return self
else:
return CArrayType(base_type)
def deduce_template_params(self, actual):
if isinstance(actual, CArrayType):
return self.base_type.deduce_template_params(actual.base_type)
else:
return None
class CPtrType(CPointerBaseType):
# base_type CType Reference type
is_ptr = 1
default_value = "0"
def __hash__(self):
return hash(self.base_type) + 27 # arbitrarily chosen offset
def __eq__(self, other):
if isinstance(other, CType) and other.is_ptr:
return self.base_type.same_as(other.base_type)
return False
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<CPtrType %s>" % repr(self.base_type)
def same_as_resolved_type(self, other_type):
return ((other_type.is_ptr and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CPtrType.declaration_code: pointer to", self.base_type ###
return self.base_type.declaration_code(
"*%s" % entity_code,
for_display, dll_linkage, pyrex)
def assignable_from_resolved_type(self, other_type):
if other_type is error_type:
return 1
if other_type.is_null_ptr:
return 1
if self.base_type.is_const:
self = CPtrType(self.base_type.const_base_type)
if self.base_type.is_cfunction:
if other_type.is_ptr:
other_type = other_type.base_type.resolve()
if other_type.is_cfunction:
return self.base_type.pointer_assignable_from_resolved_type(other_type)
else:
return 0
if (self.base_type.is_cpp_class and other_type.is_ptr
and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(self.base_type)):
return 1
if other_type.is_array or other_type.is_ptr:
return self.base_type.is_void or self.base_type.same_as(other_type.base_type)
return 0
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
return self
else:
return CPtrType(base_type)
def deduce_template_params(self, actual):
if isinstance(actual, CPtrType):
return self.base_type.deduce_template_params(actual.base_type)
else:
return None
def invalid_value(self):
return "1"
def find_cpp_operation_type(self, operator, operand_type=None):
if self.base_type.is_cpp_class:
return self.base_type.find_cpp_operation_type(operator, operand_type)
return None
class CNullPtrType(CPtrType):
is_null_ptr = 1
class CReferenceType(BaseType):
is_reference = 1
def __init__(self, base_type):
self.ref_base_type = base_type
def __repr__(self):
return "<CReferenceType %s>" % repr(self.ref_base_type)
def __str__(self):
return "%s &" % self.ref_base_type
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CReferenceType.declaration_code: pointer to", self.base_type ###
return self.ref_base_type.declaration_code(
"&%s" % entity_code,
for_display, dll_linkage, pyrex)
def specialize(self, values):
base_type = self.ref_base_type.specialize(values)
if base_type == self.ref_base_type:
return self
else:
return CReferenceType(base_type)
def deduce_template_params(self, actual):
return self.ref_base_type.deduce_template_params(actual)
def __getattr__(self, name):
return getattr(self.ref_base_type, name)
class CFuncType(CType):
# return_type CType
# args [CFuncTypeArg]
# has_varargs boolean
# exception_value string
# exception_check boolean True if PyErr_Occurred check needed
# calling_convention string Function calling convention
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# templates [string] or None
# cached_specialized_types [CFuncType] cached specialized versions of the CFuncType if defined in a pxd
# from_fused boolean Indicates whether this is a specialized
# C function
# is_strict_signature boolean function refuses to accept coerced arguments
# (used for optimisation overrides)
# is_const_method boolean
is_cfunction = 1
original_sig = None
cached_specialized_types = None
from_fused = False
is_const_method = False
subtypes = ['return_type', 'args']
def __init__(self, return_type, args, has_varargs = 0,
exception_value = None, exception_check = 0, calling_convention = "",
nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0,
is_const_method = False, templates = None, is_strict_signature = False):
self.return_type = return_type
self.args = args
self.has_varargs = has_varargs
self.optional_arg_count = optional_arg_count
self.exception_value = exception_value
self.exception_check = exception_check
self.calling_convention = calling_convention
self.nogil = nogil
self.with_gil = with_gil
self.is_overridable = is_overridable
self.is_const_method = is_const_method
self.templates = templates
self.is_strict_signature = is_strict_signature
def __repr__(self):
arg_reprs = map(repr, self.args)
if self.has_varargs:
arg_reprs.append("...")
if self.exception_value:
except_clause = " %r" % self.exception_value
else:
except_clause = ""
if self.exception_check:
except_clause += "?"
return "<CFuncType %s %s[%s]%s>" % (
repr(self.return_type),
self.calling_convention_prefix(),
",".join(arg_reprs),
except_clause)
def calling_convention_prefix(self):
cc = self.calling_convention
if cc:
return cc + " "
else:
return ""
def as_argument_type(self):
return c_ptr_type(self)
def same_c_signature_as(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(
other_type.resolve(), as_cmethod)
def same_c_signature_as_resolved_type(self, other_type, as_cmethod = 0):
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
if self.is_overridable != other_type.is_overridable:
return 0
nargs = len(self.args)
if nargs != len(other_type.args):
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, nargs):
if not self.args[i].type.same_as(
other_type.args[i].type):
return 0
if self.has_varargs != other_type.has_varargs:
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
if not self.return_type.same_as(other_type.return_type):
return 0
if not self.same_calling_convention_as(other_type):
return 0
return 1
def compatible_signature_with(self, other_type, as_cmethod = 0):
return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod)
def compatible_signature_with_resolved_type(self, other_type, as_cmethod):
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
if not self.is_overridable and other_type.is_overridable:
return 0
nargs = len(self.args)
if nargs - self.optional_arg_count != len(other_type.args) - other_type.optional_arg_count:
return 0
if self.optional_arg_count < other_type.optional_arg_count:
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, len(other_type.args)):
if not self.args[i].type.same_as(
other_type.args[i].type):
return 0
if self.has_varargs != other_type.has_varargs:
return 0
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
if not self.same_calling_convention_as(other_type):
return 0
if self.nogil != other_type.nogil:
return 0
self.original_sig = other_type.original_sig or other_type
return 1
def narrower_c_signature_than(self, other_type, as_cmethod = 0):
return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod)
def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod):
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
nargs = len(self.args)
if nargs != len(other_type.args):
return 0
for i in range(as_cmethod, nargs):
if not self.args[i].type.subtype_of_resolved_type(other_type.args[i].type):
return 0
else:
self.args[i].needs_type_test = other_type.args[i].needs_type_test \
or not self.args[i].type.same_as(other_type.args[i].type)
if self.has_varargs != other_type.has_varargs:
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
return 1
def same_calling_convention_as(self, other):
## XXX Under discussion ...
## callspec_words = ("__stdcall", "__cdecl", "__fastcall")
## cs1 = self.calling_convention
## cs2 = other.calling_convention
## if (cs1 in callspec_words or
## cs2 in callspec_words):
## return cs1 == cs2
## else:
## return True
sc1 = self.calling_convention == '__stdcall'
sc2 = other.calling_convention == '__stdcall'
return sc1 == sc2
def same_exception_signature_as(self, other_type):
return self.same_exception_signature_as_resolved_type(
other_type.resolve())
def same_exception_signature_as_resolved_type(self, other_type):
return self.exception_value == other_type.exception_value \
and self.exception_check == other_type.exception_check
def same_as_resolved_type(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(other_type, as_cmethod) \
and self.same_exception_signature_as_resolved_type(other_type) \
and self.nogil == other_type.nogil
def pointer_assignable_from_resolved_type(self, other_type):
return self.same_c_signature_as_resolved_type(other_type) \
and self.same_exception_signature_as_resolved_type(other_type) \
and not (self.nogil and not other_type.nogil)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0,
with_calling_convention = 1):
arg_decl_list = []
for arg in self.args[:len(self.args)-self.optional_arg_count]:
arg_decl_list.append(
arg.type.declaration_code("", for_display, pyrex = pyrex))
if self.is_overridable:
arg_decl_list.append("int %s" % Naming.skip_dispatch_cname)
if self.optional_arg_count:
arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname))
if self.has_varargs:
arg_decl_list.append("...")
arg_decl_code = ", ".join(arg_decl_list)
if not arg_decl_code and not pyrex:
arg_decl_code = "void"
trailer = ""
if (pyrex or for_display) and not self.return_type.is_pyobject:
if self.exception_value and self.exception_check:
trailer = " except? %s" % self.exception_value
elif self.exception_value:
trailer = " except %s" % self.exception_value
elif self.exception_check == '+':
trailer = " except +"
else:
" except *" # ignored
if self.nogil:
trailer += " nogil"
if not with_calling_convention:
cc = ''
else:
cc = self.calling_convention_prefix()
if (not entity_code and cc) or entity_code.startswith("*"):
entity_code = "(%s%s)" % (cc, entity_code)
cc = ""
if self.is_const_method:
trailer += " const"
return self.return_type.declaration_code(
"%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer),
for_display, dll_linkage, pyrex)
def function_header_code(self, func_name, arg_code):
if self.is_const_method:
trailer = " const"
else:
trailer = ""
return "%s%s(%s)%s" % (self.calling_convention_prefix(),
func_name, arg_code, trailer)
def signature_string(self):
s = self.declaration_code("")
return s
def signature_cast_string(self):
s = self.declaration_code("(*)", with_calling_convention=False)
return '(%s)' % s
def specialize(self, values):
result = CFuncType(self.return_type.specialize(values),
[arg.specialize(values) for arg in self.args],
has_varargs = self.has_varargs,
exception_value = self.exception_value,
exception_check = self.exception_check,
calling_convention = self.calling_convention,
nogil = self.nogil,
with_gil = self.with_gil,
is_overridable = self.is_overridable,
optional_arg_count = self.optional_arg_count,
is_const_method = self.is_const_method,
templates = self.templates)
result.from_fused = self.is_fused
return result
def opt_arg_cname(self, arg_name):
return self.op_arg_struct.base_type.scope.lookup(arg_name).cname
# Methods that deal with Fused Types
# All but map_with_specific_entries should be called only on functions
# with fused types (and not on their corresponding specific versions).
def get_all_specialized_permutations(self, fused_types=None):
"""
Permute all the types. For every specific instance of a fused type, we
want all other specific instances of all other fused types.
It returns an iterable of two-tuples of the cname that should prefix
the cname of the function, and a dict mapping any fused types to their
respective specific types.
"""
assert self.is_fused
if fused_types is None:
fused_types = self.get_fused_types()
return get_all_specialized_permutations(fused_types)
def get_all_specialized_function_types(self):
"""
Get all the specific function types of this one.
"""
assert self.is_fused
if self.entry.fused_cfunction:
return [n.type for n in self.entry.fused_cfunction.nodes]
elif self.cached_specialized_types is not None:
return self.cached_specialized_types
cfunc_entries = self.entry.scope.cfunc_entries
cfunc_entries.remove(self.entry)
result = []
permutations = self.get_all_specialized_permutations()
for cname, fused_to_specific in permutations:
new_func_type = self.entry.type.specialize(fused_to_specific)
if self.optional_arg_count:
# Remember, this method is set by CFuncDeclaratorNode
self.declare_opt_arg_struct(new_func_type, cname)
new_entry = copy.deepcopy(self.entry)
new_func_type.specialize_entry(new_entry, cname)
new_entry.type = new_func_type
new_func_type.entry = new_entry
result.append(new_func_type)
cfunc_entries.append(new_entry)
self.cached_specialized_types = result
return result
def get_fused_types(self, result=None, seen=None, subtypes=None):
"""Return fused types in the order they appear as parameter types"""
return super(CFuncType, self).get_fused_types(result, seen,
subtypes=['args'])
def specialize_entry(self, entry, cname):
assert not self.is_fused
specialize_entry(entry, cname)
def specialize_entry(entry, cname):
"""
Specialize an entry of a copied fused function or method
"""
entry.is_fused_specialized = True
entry.name = get_fused_cname(cname, entry.name)
if entry.is_cmethod:
entry.cname = entry.name
if entry.is_inherited:
entry.cname = StringEncoding.EncodedString(
"%s.%s" % (Naming.obj_base_cname, entry.cname))
else:
entry.cname = get_fused_cname(cname, entry.cname)
if entry.func_cname:
entry.func_cname = get_fused_cname(cname, entry.func_cname)
def get_fused_cname(fused_cname, orig_cname):
"""
Given the fused cname id and an original cname, return a specialized cname
"""
assert fused_cname and orig_cname
return StringEncoding.EncodedString('%s%s%s' % (Naming.fused_func_prefix,
fused_cname, orig_cname))
def unique(somelist):
seen = set()
result = []
for obj in somelist:
if obj not in seen:
result.append(obj)
seen.add(obj)
return result
def get_all_specialized_permutations(fused_types):
return _get_all_specialized_permutations(unique(fused_types))
def _get_all_specialized_permutations(fused_types, id="", f2s=()):
fused_type, = fused_types[0].get_fused_types()
result = []
for newid, specific_type in enumerate(fused_type.types):
# f2s = dict(f2s, **{ fused_type: specific_type })
f2s = dict(f2s)
f2s.update({ fused_type: specific_type })
if id:
cname = '%s_%s' % (id, newid)
else:
cname = str(newid)
if len(fused_types) > 1:
result.extend(_get_all_specialized_permutations(
fused_types[1:], cname, f2s))
else:
result.append((cname, f2s))
return result
def specialization_signature_string(fused_compound_type, fused_to_specific):
"""
Return the signature for a specialization of a fused type. e.g.
floating[:] ->
'float' or 'double'
cdef fused ft:
float[:]
double[:]
ft ->
'float[:]' or 'double[:]'
integral func(floating) ->
'int (*func)(float)' or ...
"""
fused_types = fused_compound_type.get_fused_types()
if len(fused_types) == 1:
fused_type = fused_types[0]
else:
fused_type = fused_compound_type
return fused_type.specialize(fused_to_specific).typeof_name()
def get_specialized_types(type):
"""
Return a list of specialized types sorted in reverse order in accordance
with their preference in runtime fused-type dispatch
"""
assert type.is_fused
if isinstance(type, FusedType):
result = type.types
for specialized_type in result:
specialized_type.specialization_string = specialized_type.typeof_name()
else:
result = []
for cname, f2s in get_all_specialized_permutations(type.get_fused_types()):
specialized_type = type.specialize(f2s)
specialized_type.specialization_string = (
specialization_signature_string(type, f2s))
result.append(specialized_type)
return sorted(result)
class CFuncTypeArg(BaseType):
# name string
# cname string
# type PyrexType
# pos source file position
# FIXME: is this the right setup? should None be allowed here?
not_none = False
or_none = False
accept_none = True
accept_builtin_subtypes = False
subtypes = ['type']
def __init__(self, name, type, pos, cname=None):
self.name = name
if cname is not None:
self.cname = cname
else:
self.cname = Naming.var_prefix + name
self.type = type
self.pos = pos
self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
def __repr__(self):
return "%s:%s" % (self.name, repr(self.type))
def declaration_code(self, for_display = 0):
return self.type.declaration_code(self.cname, for_display)
def specialize(self, values):
return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
class ToPyStructUtilityCode(object):
requires = None
def __init__(self, type, forward_decl):
self.type = type
self.header = "static PyObject* %s(%s)" % (type.to_py_function,
type.declaration_code('s'))
self.forward_decl = forward_decl
def __eq__(self, other):
return isinstance(other, ToPyStructUtilityCode) and self.header == other.header
def __hash__(self):
return hash(self.header)
def get_tree(self):
pass
def put_code(self, output):
code = output['utility_code_def']
proto = output['utility_code_proto']
code.putln("%s {" % self.header)
code.putln("PyObject* res;")
code.putln("PyObject* member;")
code.putln("res = PyDict_New(); if (res == NULL) return NULL;")
for member in self.type.scope.var_entries:
nameconst_cname = code.get_py_string_const(member.name, identifier=True)
code.putln("member = %s(s.%s); if (member == NULL) goto bad;" % (
member.type.to_py_function, member.cname))
code.putln("if (PyDict_SetItem(res, %s, member) < 0) goto bad;" % nameconst_cname)
code.putln("Py_DECREF(member);")
code.putln("return res;")
code.putln("bad:")
code.putln("Py_XDECREF(member);")
code.putln("Py_DECREF(res);")
code.putln("return NULL;")
code.putln("}")
# This is a bit of a hack, we need a forward declaration
# due to the way things are ordered in the module...
if self.forward_decl:
proto.putln(self.type.declaration_code('') + ';')
proto.putln(self.header + ";")
def inject_tree_and_scope_into(self, module_node):
pass
class CStructOrUnionType(CType):
# name string
# cname string
# kind string "struct" or "union"
# scope StructOrUnionScope, or None if incomplete
# typedef_flag boolean
# packed boolean
# entry Entry
is_struct_or_union = 1
has_attributes = 1
exception_check = True
def __init__(self, name, kind, scope, typedef_flag, cname, packed=False):
self.name = name
self.cname = cname
self.kind = kind
self.scope = scope
self.typedef_flag = typedef_flag
self.is_struct = kind == 'struct'
if self.is_struct:
self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname)
self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname)
self.exception_check = True
self._convert_to_py_code = None
self._convert_from_py_code = None
self.packed = packed
def create_to_py_utility_code(self, env):
if env.outer_scope is None:
return False
if self._convert_to_py_code is False:
return None # tri-state-ish
if self._convert_to_py_code is None:
for member in self.scope.var_entries:
if not member.type.create_to_py_utility_code(env):
self.to_py_function = None
self._convert_to_py_code = False
return False
forward_decl = (self.entry.visibility != 'extern')
self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl)
env.use_utility_code(self._convert_to_py_code)
return True
def create_from_py_utility_code(self, env):
if env.outer_scope is None:
return False
if self._convert_from_py_code is False:
return None # tri-state-ish
if self._convert_from_py_code is None:
for member in self.scope.var_entries:
if not member.type.create_from_py_utility_code(env):
self.from_py_function = None
self._convert_from_py_code = False
return False
context = dict(
struct_type_decl=self.declaration_code(""),
var_entries=self.scope.var_entries,
funcname=self.from_py_function,
)
self._convert_from_py_code = TempitaUtilityCode.load(
"FromPyStructUtility", "TypeConversion.c", context=context)
env.use_utility_code(self._convert_from_py_code)
return True
def __repr__(self):
return "<CStructOrUnionType %s %s%s>" % (
self.name, self.cname,
("", " typedef")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display=0, dll_linkage=None, pyrex=0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
base_code = self.cname
else:
base_code = "%s %s" % (self.kind, self.cname)
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def __eq__(self, other):
try:
return (isinstance(other, CStructOrUnionType) and
self.name == other.name)
except AttributeError:
return False
def __lt__(self, other):
try:
return self.name < other.name
except AttributeError:
# this is arbitrary, but it makes sure we always have
# *some* kind of order
return False
def __hash__(self):
return hash(self.cname) ^ hash(self.kind)
def is_complete(self):
return self.scope is not None
def attributes_known(self):
return self.is_complete()
def can_be_complex(self):
# Does the struct consist of exactly two identical floats?
fields = self.scope.var_entries
if len(fields) != 2: return False
a, b = fields
return (a.type.is_float and b.type.is_float and
a.type.declaration_code("") ==
b.type.declaration_code(""))
def struct_nesting_depth(self):
child_depths = [x.type.struct_nesting_depth()
for x in self.scope.var_entries]
return max(child_depths) + 1
def cast_code(self, expr_code):
if self.is_struct:
return expr_code
return super(CStructOrUnionType, self).cast_code(expr_code)
builtin_cpp_conversions = ("std::string",
"std::pair",
"std::vector", "std::list",
"std::set", "std::unordered_set",
"std::map", "std::unordered_map")
class CppClassType(CType):
# name string
# cname string
# scope CppClassScope
# templates [string] or None
is_cpp_class = 1
has_attributes = 1
exception_check = True
namespace = None
# For struct-like declaration.
kind = "struct"
packed = False
typedef_flag = False
subtypes = ['templates']
def __init__(self, name, scope, cname, base_classes, templates = None, template_type = None):
self.name = name
self.cname = cname
self.scope = scope
self.base_classes = base_classes
self.operators = []
self.templates = templates
self.template_type = template_type
self.specializations = {}
self.is_cpp_string = cname == 'std::string'
def use_conversion_utility(self, from_or_to):
pass
def maybe_unordered(self):
if 'unordered' in self.cname:
return 'unordered_'
else:
return ''
def create_from_py_utility_code(self, env):
if self.from_py_function is not None:
return True
if self.cname in builtin_cpp_conversions:
X = "XYZABC"
tags = []
declarations = ["cdef extern from *:"]
for ix, T in enumerate(self.templates or []):
if T.is_pyobject or not T.create_from_py_utility_code(env):
return False
tags.append(T.specialization_name())
if T.exception_value is not None:
except_clause = T.exception_value
if T.exception_check:
except_clause = "? %s" % except_clause
declarations.append(
" ctypedef %s %s '%s'" % (
T.declaration_code("", for_display=True), X[ix], T.declaration_code("")))
else:
except_clause = "*"
declarations.append(
" ctypedef struct %s '%s':\n pass" % (
X[ix], T.declaration_code("")))
declarations.append(
" cdef %s %s_from_py '%s' (object) except %s" % (
X[ix], X[ix], T.from_py_function, except_clause))
cls = self.cname[5:]
cname = '__pyx_convert_%s_from_py_%s' % (cls, '____'.join(tags))
context = {
'template_type_declarations': '\n'.join(declarations),
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
}
from UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx", context=context))
self.from_py_function = cname
return True
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return True
if self.cname in builtin_cpp_conversions:
X = "XYZABC"
tags = []
declarations = ["cdef extern from *:"]
for ix, T in enumerate(self.templates or []):
if not T.create_to_py_utility_code(env):
return False
tags.append(T.specialization_name())
declarations.append(
" ctypedef struct %s '%s':\n pass" % (
X[ix], T.declaration_code("")))
declarations.append(
" cdef object %s_to_py '%s' (%s)" % (
X[ix], T.to_py_function, X[ix]))
cls = self.cname[5:]
cname = "__pyx_convert_%s_to_py_%s" % (cls, "____".join(tags))
context = {
'template_type_declarations': '\n'.join(declarations),
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
}
from UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx", context=context))
self.to_py_function = cname
return True
def specialize_here(self, pos, template_values = None):
if self.templates is None:
error(pos, "'%s' type is not a template" % self)
return error_type
if len(self.templates) != len(template_values):
error(pos, "%s templated type receives %d arguments, got %d" %
(self.name, len(self.templates), len(template_values)))
return error_type
has_object_template_param = False
for value in template_values:
if value.is_pyobject:
has_object_template_param = True
error(pos,
"Python object type '%s' cannot be used as a template argument" % value)
if has_object_template_param:
return error_type
return self.specialize(dict(zip(self.templates, template_values)))
def specialize(self, values):
if not self.templates and not self.namespace:
return self
if self.templates is None:
self.templates = []
key = tuple(values.items())
if key in self.specializations:
return self.specializations[key]
template_values = [t.specialize(values) for t in self.templates]
specialized = self.specializations[key] = \
CppClassType(self.name, None, self.cname, [], template_values, template_type=self)
# Need to do these *after* self.specializations[key] is set
# to avoid infinite recursion on circular references.
specialized.base_classes = [b.specialize(values) for b in self.base_classes]
specialized.scope = self.scope.specialize(values)
if self.namespace is not None:
specialized.namespace = self.namespace.specialize(values)
return specialized
def deduce_template_params(self, actual):
if self == actual:
return {}
# TODO(robertwb): Actual type equality.
elif self.declaration_code("") == actual.template_type.declaration_code(""):
return reduce(
merge_template_deductions,
[formal_param.deduce_template_params(actual_param) for (formal_param, actual_param) in zip(self.templates, actual.templates)],
{})
else:
return None
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.templates:
template_strings = [param.declaration_code('', for_display, None, pyrex)
for param in self.templates]
if for_display:
brackets = "[%s]"
else:
brackets = "<%s>"
templates = brackets % ",".join(template_strings)
if templates[-2:] == ">>":
templates = templates[:-2] + "> >"
else:
templates = ""
if pyrex or for_display:
base_code = "%s%s" % (self.name, templates)
else:
base_code = "%s%s" % (self.cname, templates)
if self.namespace is not None:
base_code = "%s::%s" % (self.namespace.declaration_code(''), base_code)
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def is_subclass(self, other_type):
if self.same_as_resolved_type(other_type):
return 1
for base_class in self.base_classes:
if base_class.is_subclass(other_type):
return 1
return 0
def same_as_resolved_type(self, other_type):
if other_type.is_cpp_class:
if self == other_type:
return 1
elif (self.cname == other_type.cname and
self.template_type and other_type.template_type):
if self.templates == other_type.templates:
return 1
for t1, t2 in zip(self.templates, other_type.templates):
if not t1.same_as_resolved_type(t2):
return 0
return 1
return 0
def assignable_from_resolved_type(self, other_type):
# TODO: handle operator=(...) here?
if other_type is error_type:
return True
return other_type.is_cpp_class and other_type.is_subclass(self)
def attributes_known(self):
return self.scope is not None
def find_cpp_operation_type(self, operator, operand_type=None):
operands = [self]
if operand_type is not None:
operands.append(operand_type)
# pos == None => no errors
operator_entry = self.scope.lookup_operator_for_types(None, operator, operands)
if not operator_entry:
return None
func_type = operator_entry.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type.return_type
def check_nullary_constructor(self, pos, msg="stack allocated"):
constructor = self.scope.lookup(u'<init>')
if constructor is not None and best_match([], constructor.all_alternatives()) is None:
error(pos, "C++ class must have a nullary constructor to be %s" % msg)
class TemplatePlaceholderType(CType):
def __init__(self, name):
self.name = name
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if entity_code:
return self.name + " " + entity_code
else:
return self.name
def specialize(self, values):
if self in values:
return values[self]
else:
return self
def deduce_template_params(self, actual):
return {self: actual}
def same_as_resolved_type(self, other_type):
if isinstance(other_type, TemplatePlaceholderType):
return self.name == other_type.name
else:
return 0
def __hash__(self):
return hash(self.name)
def __cmp__(self, other):
if isinstance(other, TemplatePlaceholderType):
return cmp(self.name, other.name)
else:
return cmp(type(self), type(other))
def __eq__(self, other):
if isinstance(other, TemplatePlaceholderType):
return self.name == other.name
else:
return False
class CEnumType(CType):
# name string
# cname string or None
# typedef_flag boolean
is_enum = 1
signed = 1
rank = -1 # Ranks below any integer type
to_py_function = "PyInt_FromLong"
from_py_function = "PyInt_AsLong"
def __init__(self, name, cname, typedef_flag):
self.name = name
self.cname = cname
self.values = []
self.typedef_flag = typedef_flag
def __str__(self):
return self.name
def __repr__(self):
return "<CEnumType %s %s%s>" % (self.name, self.cname,
("", " typedef")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
base_code = self.cname
else:
base_code = "enum %s" % self.cname
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
class UnspecifiedType(PyrexType):
# Used as a placeholder until the type can be determined.
is_unspecified = 1
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<unspecified>"
def same_as_resolved_type(self, other_type):
return False
class ErrorType(PyrexType):
# Used to prevent propagation of error messages.
is_error = 1
exception_value = "0"
exception_check = 0
to_py_function = "dummy"
from_py_function = "dummy"
def create_to_py_utility_code(self, env):
return True
def create_from_py_utility_code(self, env):
return True
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<error>"
def same_as_resolved_type(self, other_type):
return 1
def error_condition(self, result_code):
return "dummy"
rank_to_type_name = (
"char", # 0
"short", # 1
"int", # 2
"long", # 3
"PY_LONG_LONG", # 4
"float", # 5
"double", # 6
"long double", # 7
)
_rank_to_type_name = list(rank_to_type_name)
RANK_INT = _rank_to_type_name.index('int')
RANK_LONG = _rank_to_type_name.index('long')
RANK_FLOAT = _rank_to_type_name.index('float')
UNSIGNED = 0
SIGNED = 2
error_type = ErrorType()
unspecified_type = UnspecifiedType()
py_object_type = PyObjectType()
c_void_type = CVoidType()
c_uchar_type = CIntType(0, UNSIGNED)
c_ushort_type = CIntType(1, UNSIGNED)
c_uint_type = CIntType(2, UNSIGNED)
c_ulong_type = CIntType(3, UNSIGNED)
c_ulonglong_type = CIntType(4, UNSIGNED)
c_char_type = CIntType(0)
c_short_type = CIntType(1)
c_int_type = CIntType(2)
c_long_type = CIntType(3)
c_longlong_type = CIntType(4)
c_schar_type = CIntType(0, SIGNED)
c_sshort_type = CIntType(1, SIGNED)
c_sint_type = CIntType(2, SIGNED)
c_slong_type = CIntType(3, SIGNED)
c_slonglong_type = CIntType(4, SIGNED)
c_float_type = CFloatType(5, math_h_modifier='f')
c_double_type = CFloatType(6)
c_longdouble_type = CFloatType(7, math_h_modifier='l')
c_float_complex_type = CComplexType(c_float_type)
c_double_complex_type = CComplexType(c_double_type)
c_longdouble_complex_type = CComplexType(c_longdouble_type)
c_anon_enum_type = CAnonEnumType(-1)
c_returncode_type = CReturnCodeType(RANK_INT)
c_bint_type = CBIntType(RANK_INT)
c_py_unicode_type = CPyUnicodeIntType(RANK_INT-0.5, UNSIGNED)
c_py_ucs4_type = CPyUCS4IntType(RANK_LONG-0.5, UNSIGNED)
c_py_hash_t_type = CPyHashTType(RANK_LONG+0.5, SIGNED)
c_py_ssize_t_type = CPySSizeTType(RANK_LONG+0.5, SIGNED)
c_ssize_t_type = CSSizeTType(RANK_LONG+0.5, SIGNED)
c_size_t_type = CSizeTType(RANK_LONG+0.5, UNSIGNED)
c_ptrdiff_t_type = CPtrdiffTType(RANK_LONG+0.75, SIGNED)
c_null_ptr_type = CNullPtrType(c_void_type)
c_void_ptr_type = CPtrType(c_void_type)
c_void_ptr_ptr_type = CPtrType(c_void_ptr_type)
c_char_ptr_type = CPtrType(c_char_type)
c_uchar_ptr_type = CPtrType(c_uchar_type)
c_char_ptr_ptr_type = CPtrType(c_char_ptr_type)
c_int_ptr_type = CPtrType(c_int_type)
c_py_unicode_ptr_type = CPtrType(c_py_unicode_type)
c_py_ssize_t_ptr_type = CPtrType(c_py_ssize_t_type)
c_ssize_t_ptr_type = CPtrType(c_ssize_t_type)
c_size_t_ptr_type = CPtrType(c_size_t_type)
# GIL state
c_gilstate_type = CEnumType("PyGILState_STATE", "PyGILState_STATE", True)
c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState")
c_threadstate_ptr_type = CPtrType(c_threadstate_type)
# the Py_buffer type is defined in Builtin.py
c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer")
c_py_buffer_ptr_type = CPtrType(c_py_buffer_type)
# Not sure whether the unsigned versions and 'long long' should be in there
# long long requires C99 and might be slow, and would always get preferred
# when specialization happens through calling and not indexing
cy_integral_type = FusedType([c_short_type, c_int_type, c_long_type],
name="integral")
# Omitting long double as it might be slow
cy_floating_type = FusedType([c_float_type, c_double_type], name="floating")
cy_numeric_type = FusedType([c_short_type,
c_int_type,
c_long_type,
c_float_type,
c_double_type,
c_float_complex_type,
c_double_complex_type], name="numeric")
# buffer-related structs
c_buf_diminfo_type = CStructOrUnionType("__Pyx_Buf_DimInfo", "struct",
None, 1, "__Pyx_Buf_DimInfo")
c_pyx_buffer_type = CStructOrUnionType("__Pyx_Buffer", "struct", None, 1, "__Pyx_Buffer")
c_pyx_buffer_ptr_type = CPtrType(c_pyx_buffer_type)
c_pyx_buffer_nd_type = CStructOrUnionType("__Pyx_LocalBuf_ND", "struct",
None, 1, "__Pyx_LocalBuf_ND")
cython_memoryview_type = CStructOrUnionType("__pyx_memoryview_obj", "struct",
None, 0, "__pyx_memoryview_obj")
memoryviewslice_type = CStructOrUnionType("memoryviewslice", "struct",
None, 1, "__Pyx_memviewslice")
modifiers_and_name_to_type = {
#(signed, longness, name) : type
(0, 0, "char"): c_uchar_type,
(1, 0, "char"): c_char_type,
(2, 0, "char"): c_schar_type,
(0, -1, "int"): c_ushort_type,
(0, 0, "int"): c_uint_type,
(0, 1, "int"): c_ulong_type,
(0, 2, "int"): c_ulonglong_type,
(1, -1, "int"): c_short_type,
(1, 0, "int"): c_int_type,
(1, 1, "int"): c_long_type,
(1, 2, "int"): c_longlong_type,
(2, -1, "int"): c_sshort_type,
(2, 0, "int"): c_sint_type,
(2, 1, "int"): c_slong_type,
(2, 2, "int"): c_slonglong_type,
(1, 0, "float"): c_float_type,
(1, 0, "double"): c_double_type,
(1, 1, "double"): c_longdouble_type,
(1, 0, "complex"): c_double_complex_type, # C: float, Python: double => Python wins
(1, 0, "floatcomplex"): c_float_complex_type,
(1, 0, "doublecomplex"): c_double_complex_type,
(1, 1, "doublecomplex"): c_longdouble_complex_type,
#
(1, 0, "void"): c_void_type,
(1, 0, "bint"): c_bint_type,
(0, 0, "Py_UNICODE"): c_py_unicode_type,
(0, 0, "Py_UCS4"): c_py_ucs4_type,
(2, 0, "Py_hash_t"): c_py_hash_t_type,
(2, 0, "Py_ssize_t"): c_py_ssize_t_type,
(2, 0, "ssize_t") : c_ssize_t_type,
(0, 0, "size_t") : c_size_t_type,
(2, 0, "ptrdiff_t") : c_ptrdiff_t_type,
(1, 0, "object"): py_object_type,
}
def is_promotion(src_type, dst_type):
# It's hard to find a hard definition of promotion, but empirical
# evidence suggests that the below is all that's allowed.
if src_type.is_numeric:
if dst_type.same_as(c_int_type):
unsigned = (not src_type.signed)
return (src_type.is_enum or
(src_type.is_int and
unsigned + src_type.rank < dst_type.rank))
elif dst_type.same_as(c_double_type):
return src_type.is_float and src_type.rank <= dst_type.rank
return False
def best_match(args, functions, pos=None, env=None):
"""
Given a list args of arguments and a list of functions, choose one
to call which seems to be the "best" fit for this list of arguments.
This function is used, e.g., when deciding which overloaded method
to dispatch for C++ classes.
We first eliminate functions based on arity, and if only one
function has the correct arity, we return it. Otherwise, we weight
functions based on how much work must be done to convert the
arguments, with the following priorities:
* identical types or pointers to identical types
* promotions
* non-Python types
That is, we prefer functions where no arguments need converted,
and failing that, functions where only promotions are required, and
so on.
If no function is deemed a good fit, or if two or more functions have
the same weight, we return None (as there is no best match). If pos
is not None, we also generate an error.
"""
# TODO: args should be a list of types, not a list of Nodes.
actual_nargs = len(args)
candidates = []
errors = []
for func in functions:
error_mesg = ""
func_type = func.type
if func_type.is_ptr:
func_type = func_type.base_type
# Check function type
if not func_type.is_cfunction:
if not func_type.is_error and pos is not None:
error_mesg = "Calling non-function type '%s'" % func_type
errors.append((func, error_mesg))
continue
# Check no. of args
max_nargs = len(func_type.args)
min_nargs = max_nargs - func_type.optional_arg_count
if actual_nargs < min_nargs or \
(not func_type.has_varargs and actual_nargs > max_nargs):
if max_nargs == min_nargs and not func_type.has_varargs:
expectation = max_nargs
elif actual_nargs < min_nargs:
expectation = "at least %s" % min_nargs
else:
expectation = "at most %s" % max_nargs
error_mesg = "Call with wrong number of arguments (expected %s, got %s)" \
% (expectation, actual_nargs)
errors.append((func, error_mesg))
continue
if func_type.templates:
arg_types = [arg.type for arg in args]
deductions = reduce(
merge_template_deductions,
[pattern.type.deduce_template_params(actual) for (pattern, actual) in zip(func_type.args, arg_types)],
{})
if deductions is None:
errors.append((func, "Unable to deduce type parameters"))
elif len(deductions) < len(func_type.templates):
errors.append((func, "Unable to deduce type parameter %s" % (
", ".join([param.name for param in set(func_type.templates) - set(deductions.keys())]))))
else:
type_list = [deductions[param] for param in func_type.templates]
from Symtab import Entry
specialization = Entry(
name = func.name + "[%s]" % ",".join([str(t) for t in type_list]),
cname = func.cname + "<%s>" % ",".join([t.declaration_code("") for t in type_list]),
type = func_type.specialize(deductions),
pos = func.pos)
candidates.append((specialization, specialization.type))
else:
candidates.append((func, func_type))
# Optimize the most common case of no overloading...
if len(candidates) == 1:
return candidates[0][0]
elif len(candidates) == 0:
if pos is not None:
func, errmsg = errors[0]
if len(errors) == 1 or [1 for func, e in errors if e == errmsg]:
error(pos, errmsg)
else:
error(pos, "no suitable method found")
return None
possibilities = []
bad_types = []
needed_coercions = {}
for index, (func, func_type) in enumerate(candidates):
score = [0,0,0,0]
for i in range(min(len(args), len(func_type.args))):
src_type = args[i].type
dst_type = func_type.args[i].type
assignable = dst_type.assignable_from(src_type)
# Now take care of normal string literals. So when you call a cdef
# function that takes a char *, the coercion will mean that the
# type will simply become bytes. We need to do this coercion
# manually for overloaded and fused functions
if not assignable and src_type.is_pyobject:
if (src_type.is_builtin_type and src_type.name == 'str' and
dst_type.resolve() is c_char_ptr_type):
c_src_type = c_char_ptr_type
else:
c_src_type = src_type.default_coerced_ctype()
if c_src_type:
assignable = dst_type.assignable_from(c_src_type)
if assignable:
src_type = c_src_type
needed_coercions[func] = i, dst_type
if assignable:
if src_type == dst_type or dst_type.same_as(src_type):
pass # score 0
elif func_type.is_strict_signature:
break # exact match requested but not found
elif is_promotion(src_type, dst_type):
score[2] += 1
elif ((src_type.is_int and dst_type.is_int) or
(src_type.is_float and dst_type.is_float)):
score[2] += abs(dst_type.rank + (not dst_type.signed) -
(src_type.rank + (not src_type.signed))) + 1
elif not src_type.is_pyobject:
score[1] += 1
else:
score[0] += 1
else:
error_mesg = "Invalid conversion from '%s' to '%s'"%(src_type,
dst_type)
bad_types.append((func, error_mesg))
break
else:
possibilities.append((score, index, func)) # so we can sort it
if possibilities:
possibilities.sort()
if len(possibilities) > 1:
score1 = possibilities[0][0]
score2 = possibilities[1][0]
if score1 == score2:
if pos is not None:
error(pos, "ambiguous overloaded method")
return None
function = possibilities[0][-1]
if function in needed_coercions and env:
arg_i, coerce_to_type = needed_coercions[function]
args[arg_i] = args[arg_i].coerce_to(coerce_to_type, env)
return function
if pos is not None:
if len(bad_types) == 1:
error(pos, bad_types[0][1])
else:
error(pos, "no suitable method found")
return None
def merge_template_deductions(a, b):
if a is None or b is None:
return None
all = a
for param, value in b.iteritems():
if param in all:
if a[param] != b[param]:
return None
else:
all[param] = value
return all
def widest_numeric_type(type1, type2):
# Given two numeric types, return the narrowest type
# encompassing both of them.
if type1 == type2:
widest_type = type1
elif type1.is_complex or type2.is_complex:
def real_type(ntype):
if ntype.is_complex:
return ntype.real_type
return ntype
widest_type = CComplexType(
widest_numeric_type(
real_type(type1),
real_type(type2)))
elif type1.is_enum and type2.is_enum:
widest_type = c_int_type
elif type1.rank < type2.rank:
widest_type = type2
elif type1.rank > type2.rank:
widest_type = type1
elif type1.signed < type2.signed:
widest_type = type1
else:
widest_type = type2
return widest_type
def independent_spanning_type(type1, type2):
# Return a type assignable independently from both type1 and
# type2, but do not require any interoperability between the two.
# For example, in "True * 2", it is safe to assume an integer
# result type (so spanning_type() will do the right thing),
# whereas "x = True or 2" must evaluate to a type that can hold
# both a boolean value and an integer, so this function works
# better.
if type1 == type2:
return type1
elif (type1 is c_bint_type or type2 is c_bint_type) and (type1.is_numeric and type2.is_numeric):
# special case: if one of the results is a bint and the other
# is another C integer, we must prevent returning a numeric
# type so that we do not lose the ability to coerce to a
# Python bool if we have to.
return py_object_type
span_type = _spanning_type(type1, type2)
if span_type is None:
return error_type
return span_type
def spanning_type(type1, type2):
# Return a type assignable from both type1 and type2, or
# py_object_type if no better type is found. Assumes that the
# code that calls this will try a coercion afterwards, which will
# fail if the types cannot actually coerce to a py_object_type.
if type1 == type2:
return type1
elif type1 is py_object_type or type2 is py_object_type:
return py_object_type
elif type1 is c_py_unicode_type or type2 is c_py_unicode_type:
# Py_UNICODE behaves more like a string than an int
return py_object_type
span_type = _spanning_type(type1, type2)
if span_type is None:
return py_object_type
return span_type
def _spanning_type(type1, type2):
if type1.is_numeric and type2.is_numeric:
return widest_numeric_type(type1, type2)
elif type1.is_builtin_type and type1.name == 'float' and type2.is_numeric:
return widest_numeric_type(c_double_type, type2)
elif type2.is_builtin_type and type2.name == 'float' and type1.is_numeric:
return widest_numeric_type(type1, c_double_type)
elif type1.is_extension_type and type2.is_extension_type:
return widest_extension_type(type1, type2)
elif type1.is_pyobject or type2.is_pyobject:
return py_object_type
elif type1.assignable_from(type2):
if type1.is_extension_type and type1.typeobj_is_imported():
# external types are unsafe, so we use PyObject instead
return py_object_type
return type1
elif type2.assignable_from(type1):
if type2.is_extension_type and type2.typeobj_is_imported():
# external types are unsafe, so we use PyObject instead
return py_object_type
return type2
else:
return None
def widest_extension_type(type1, type2):
if type1.typeobj_is_imported() or type2.typeobj_is_imported():
return py_object_type
while True:
if type1.subtype_of(type2):
return type2
elif type2.subtype_of(type1):
return type1
type1, type2 = type1.base_type, type2.base_type
if type1 is None or type2 is None:
return py_object_type
def simple_c_type(signed, longness, name):
# Find type descriptor for simple type given name and modifiers.
# Returns None if arguments don't make sense.
return modifiers_and_name_to_type.get((signed, longness, name))
def parse_basic_type(name):
base = None
if name.startswith('p_'):
base = parse_basic_type(name[2:])
elif name.startswith('p'):
base = parse_basic_type(name[1:])
elif name.endswith('*'):
base = parse_basic_type(name[:-1])
if base:
return CPtrType(base)
#
basic_type = simple_c_type(1, 0, name)
if basic_type:
return basic_type
#
signed = 1
longness = 0
if name == 'Py_UNICODE':
signed = 0
elif name == 'Py_UCS4':
signed = 0
elif name == 'Py_hash_t':
signed = 2
elif name == 'Py_ssize_t':
signed = 2
elif name == 'ssize_t':
signed = 2
elif name == 'size_t':
signed = 0
else:
if name.startswith('u'):
name = name[1:]
signed = 0
elif (name.startswith('s') and
not name.startswith('short')):
name = name[1:]
signed = 2
longness = 0
while name.startswith('short'):
name = name.replace('short', '', 1).strip()
longness -= 1
while name.startswith('long'):
name = name.replace('long', '', 1).strip()
longness += 1
if longness != 0 and not name:
name = 'int'
return simple_c_type(signed, longness, name)
def c_array_type(base_type, size):
# Construct a C array type.
if base_type is error_type:
return error_type
else:
return CArrayType(base_type, size)
def c_ptr_type(base_type):
# Construct a C pointer type.
if base_type is error_type:
return error_type
else:
return CPtrType(base_type)
def c_ref_type(base_type):
# Construct a C reference type
if base_type is error_type:
return error_type
else:
return CReferenceType(base_type)
def c_const_type(base_type):
# Construct a C const type.
if base_type is error_type:
return error_type
else:
return CConstType(base_type)
def same_type(type1, type2):
return type1.same_as(type2)
def assignable_from(type1, type2):
return type1.assignable_from(type2)
def typecast(to_type, from_type, expr_code):
# Return expr_code cast to a C type which can be
# assigned to to_type, assuming its existing C type
# is from_type.
if (to_type is from_type or
(not to_type.is_pyobject and assignable_from(to_type, from_type))):
return expr_code
elif (to_type is py_object_type and from_type and
from_type.is_builtin_type and from_type.name != 'type'):
# no cast needed, builtins are PyObject* already
return expr_code
else:
#print "typecast: to", to_type, "from", from_type ###
return to_type.cast_code(expr_code)
|
cchurch/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/macports.py
|
59
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jimmy Tang <jcftang@gmail.com>
# Based on okpg (Patrick Pelletier <pp.pelletier@gmail.com>), pacman
# (Afterburn) and pkgin (Shaun Zinck) modules
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: macports
author: "Jimmy Tang (@jcftang)"
short_description: Package manager for MacPorts
description:
- Manages MacPorts packages (ports)
version_added: "1.1"
options:
name:
description:
- A list of port names.
aliases: ['port']
selfupdate:
description:
- Update Macports and the ports tree, either prior to installing ports or as a separate step.
- Equivalent to running C(port selfupdate).
aliases: ['update_cache', 'update_ports']
default: "no"
type: bool
state:
description:
- Indicates the desired state of the port.
choices: [ 'present', 'absent', 'active', 'inactive' ]
default: present
upgrade:
description:
- Upgrade all outdated ports, either prior to installing ports or as a separate step.
- Equivalent to running C(port upgrade outdated).
default: "no"
type: bool
version_added: "2.8"
variant:
description:
- A port variant specification.
- 'C(variant) is only supported with state: I(installed)/I(present).'
aliases: ['variants']
version_added: "2.7"
'''
EXAMPLES = '''
- name: Install the foo port
macports:
name: foo
- name: Install the universal, x11 variant of the foo port
macports:
name: foo
variant: +universal+x11
- name: Install a list of ports
macports:
name: "{{ ports }}"
vars:
ports:
- foo
- foo-tools
- name: Update Macports and the ports tree, then upgrade all outdated ports
macports:
selfupdate: yes
upgrade: yes
- name: Update Macports and the ports tree, then install the foo port
macports:
name: foo
selfupdate: yes
- name: Remove the foo port
macports:
name: foo
state: absent
- name: Activate the foo port
macports:
name: foo
state: active
- name: Deactivate the foo port
macports:
name: foo
state: inactive
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
def selfupdate(module, port_path):
""" Update Macports and the ports tree. """
rc, out, err = module.run_command("%s -v selfupdate" % port_path)
if rc == 0:
updated = any(
re.search(r'Total number of ports parsed:\s+[^0]', s.strip()) or
re.search(r'Installing new Macports release', s.strip())
for s in out.split('\n')
if s
)
if updated:
changed = True
msg = "Macports updated successfully"
else:
changed = False
msg = "Macports already up-to-date"
return (changed, msg)
else:
module.fail_json(msg="Failed to update Macports", stdout=out, stderr=err)
def upgrade(module, port_path):
""" Upgrade outdated ports. """
rc, out, err = module.run_command("%s upgrade outdated" % port_path)
# rc is 1 when nothing to upgrade so check stdout first.
if out.strip() == "Nothing to upgrade.":
changed = False
msg = "Ports already upgraded"
return (changed, msg)
elif rc == 0:
changed = True
msg = "Outdated ports upgraded successfully"
return (changed, msg)
else:
module.fail_json(msg="Failed to upgrade outdated ports", stdout=out, stderr=err)
def query_port(module, port_path, name, state="present"):
""" Returns whether a port is installed or not. """
if state == "present":
rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (shlex_quote(port_path), shlex_quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
return False
elif state == "active":
rc, out, err = module.run_command("%s installed %s | grep -q active" % (shlex_quote(port_path), shlex_quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
return False
def remove_ports(module, port_path, ports):
""" Uninstalls one or more ports if installed. """
remove_c = 0
# Using a for loop in case of error, we can report the port that failed
for port in ports:
# Query the port first, to see if we even need to remove
if not query_port(module, port_path, port):
continue
rc, out, err = module.run_command("%s uninstall %s" % (port_path, port))
if query_port(module, port_path, port):
module.fail_json(msg="Failed to remove %s: %s" % (port, err))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="Removed %s port(s)" % remove_c)
module.exit_json(changed=False, msg="Port(s) already absent")
def install_ports(module, port_path, ports, variant):
""" Installs one or more ports if not already installed. """
install_c = 0
for port in ports:
if query_port(module, port_path, port):
continue
rc, out, err = module.run_command("%s install %s %s" % (port_path, port, variant))
if not query_port(module, port_path, port):
module.fail_json(msg="Failed to install %s: %s" % (port, err))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="Installed %s port(s)" % (install_c))
module.exit_json(changed=False, msg="Port(s) already present")
def activate_ports(module, port_path, ports):
""" Activate a port if it's inactive. """
activate_c = 0
for port in ports:
if not query_port(module, port_path, port):
module.fail_json(msg="Failed to activate %s, port(s) not present" % (port))
if query_port(module, port_path, port, state="active"):
continue
rc, out, err = module.run_command("%s activate %s" % (port_path, port))
if not query_port(module, port_path, port, state="active"):
module.fail_json(msg="Failed to activate %s: %s" % (port, err))
activate_c += 1
if activate_c > 0:
module.exit_json(changed=True, msg="Activated %s port(s)" % (activate_c))
module.exit_json(changed=False, msg="Port(s) already active")
def deactivate_ports(module, port_path, ports):
""" Deactivate a port if it's active. """
deactivated_c = 0
for port in ports:
if not query_port(module, port_path, port):
module.fail_json(msg="Failed to deactivate %s, port(s) not present" % (port))
if not query_port(module, port_path, port, state="active"):
continue
rc, out, err = module.run_command("%s deactivate %s" % (port_path, port))
if query_port(module, port_path, port, state="active"):
module.fail_json(msg="Failed to deactivate %s: %s" % (port, err))
deactivated_c += 1
if deactivated_c > 0:
module.exit_json(changed=True, msg="Deactivated %s port(s)" % (deactivated_c))
module.exit_json(changed=False, msg="Port(s) already inactive")
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=["port"], type='list'),
selfupdate=dict(aliases=["update_cache", "update_ports"], default=False, type='bool'),
state=dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]),
upgrade=dict(default=False, type='bool'),
variant=dict(aliases=["variants"], default=None, type='str')
)
)
port_path = module.get_bin_path('port', True, ['/opt/local/bin'])
p = module.params
if p["selfupdate"]:
(changed, msg) = selfupdate(module, port_path)
if not (p["name"] or p["upgrade"]):
module.exit_json(changed=changed, msg=msg)
if p["upgrade"]:
(changed, msg) = upgrade(module, port_path)
if not p["name"]:
module.exit_json(changed=changed, msg=msg)
pkgs = p["name"]
variant = p["variant"]
if p["state"] in ["present", "installed"]:
install_ports(module, port_path, pkgs, variant)
elif p["state"] in ["absent", "removed"]:
remove_ports(module, port_path, pkgs)
elif p["state"] == "active":
activate_ports(module, port_path, pkgs)
elif p["state"] == "inactive":
deactivate_ports(module, port_path, pkgs)
if __name__ == '__main__':
main()
|
esakellari/my_root_for_test
|
refs/heads/master
|
interpreter/llvm/src/tools/clang/bindings/python/tests/cindex/test_token_kind.py
|
97
|
from clang.cindex import TokenKind
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
def test_constructor():
"""Ensure TokenKind constructor works as expected."""
t = TokenKind(5, 'foo')
eq_(t.value, 5)
eq_(t.name, 'foo')
@raises(ValueError)
def test_bad_register():
"""Ensure a duplicate value is rejected for registration."""
TokenKind.register(2, 'foo')
@raises(ValueError)
def test_unknown_value():
"""Ensure trying to fetch an unknown value raises."""
TokenKind.from_value(-1)
def test_registration():
"""Ensure that items registered appear as class attributes."""
ok_(hasattr(TokenKind, 'LITERAL'))
literal = TokenKind.LITERAL
ok_(isinstance(literal, TokenKind))
def test_from_value():
"""Ensure registered values can be obtained from from_value()."""
t = TokenKind.from_value(3)
ok_(isinstance(t, TokenKind))
eq_(t, TokenKind.LITERAL)
def test_repr():
"""Ensure repr() works."""
r = repr(TokenKind.LITERAL)
eq_(r, 'TokenKind.LITERAL')
|
rahushen/ansible
|
refs/heads/devel
|
lib/ansible/modules/utilities/logic/assert.py
|
67
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: assert
short_description: Asserts given expressions are true
description:
- This module asserts that given expressions are true with an optional custom message.
- This module is also supported for Windows targets.
version_added: "1.5"
options:
that:
description:
- "A string expression of the same form that can be passed to the 'when' statement"
- "Alternatively, a list of string expressions"
required: true
msg:
description:
- "The customized message used for a failing assertion"
notes:
- This module is also supported for Windows targets.
author:
- "Ansible Core Team"
- "Michael DeHaan"
'''
EXAMPLES = '''
- assert: { that: "ansible_os_family != 'RedHat'" }
- assert:
that:
- "'foo' in some_command_result.stdout"
- "number_of_the_counting == 3"
- assert:
that:
- "my_param <= 100"
- "my_param >= 0"
msg: "'my_param' must be between 0 and 100"
'''
|
alu0100207385/dsi_3Django
|
refs/heads/master
|
django/contrib/staticfiles/management/commands/runserver.py
|
243
|
from optparse import make_option
from django.conf import settings
from django.core.management.commands.runserver import Command as RunserverCommand
from django.contrib.staticfiles.handlers import StaticFilesHandler
class Command(RunserverCommand):
option_list = RunserverCommand.option_list + (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development and also serves static files."
def get_handler(self, *args, **options):
"""
Returns the static files serving handler wrapping the default handler,
if static files should be served. Otherwise just returns the default
handler.
"""
handler = super(Command, self).get_handler(*args, **options)
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
return StaticFilesHandler(handler)
return handler
|
kenwang815/KodiPlugins
|
refs/heads/master
|
script.module.oceanktv/lib/youtube_dl/extractor/ellentv.py
|
11
|
# coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class EllenTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?:ellentv|ellentube)\.com/videos/(?P<id>[a-z0-9_-]+)'
_TEST = {
'url': 'http://www.ellentv.com/videos/0-ipq1gsai/',
'md5': '4294cf98bc165f218aaa0b89e0fd8042',
'info_dict': {
'id': '0_ipq1gsai',
'ext': 'mov',
'title': 'Fast Fingers of Fate',
'description': 'md5:3539013ddcbfa64b2a6d1b38d910868a',
'timestamp': 1428035648,
'upload_date': '20150403',
'uploader_id': 'batchUser',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://widgets.ellentube.com/videos/%s' % video_id,
video_id)
partner_id = self._search_regex(
r"var\s+partnerId\s*=\s*'([^']+)", webpage, 'partner id')
kaltura_id = self._search_regex(
[r'id="kaltura_player_([^"]+)"',
r"_wb_entry_id\s*:\s*'([^']+)",
r'data-kaltura-entry-id="([^"]+)'],
webpage, 'kaltura id')
return self.url_result('kaltura:%s:%s' % (partner_id, kaltura_id), 'Kaltura')
class EllenTVClipsIE(InfoExtractor):
IE_NAME = 'EllenTV:clips'
_VALID_URL = r'https?://(?:www\.)?ellentv\.com/episodes/(?P<id>[a-z0-9_-]+)'
_TEST = {
'url': 'http://www.ellentv.com/episodes/meryl-streep-vanessa-hudgens/',
'info_dict': {
'id': 'meryl-streep-vanessa-hudgens',
'title': 'Meryl Streep, Vanessa Hudgens',
},
'playlist_mincount': 7,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
playlist = self._extract_playlist(webpage)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist)
}
def _extract_playlist(self, webpage):
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
try:
return json.loads('[{' + json_string + '}]')
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
def _extract_entries(self, playlist):
return [
self.url_result(
'kaltura:%s:%s' % (item['kaltura_partner_id'], item['kaltura_entry_id']),
'Kaltura')
for item in playlist]
|
andhit-r/account-financial-tools
|
refs/heads/8.0
|
account_default_draft_move/__init__.py
|
17
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author Vincent Renaville. Copyright 2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account
from . import account_bank_statement
from . import res_config
|
bonus85/csv-analytics
|
refs/heads/master
|
plot_months.py
|
1
|
# -*- coding: utf-8 -*-
import process_data
from matplotlib import pyplot as plt
file_name = 'test.csv'
excel_output = 'test.xlsx'
analyzer = process_data.PowerConsumptionAnalyzer(file_name, excel_output)
for month in analyzer.months():
analyzer.write_month(month)
#print month.size
analyzer.close_wb()
|
OpenTrons/opentrons_sdk
|
refs/heads/master
|
robot-server/robot_server/service/json_api/errors.py
|
3
|
from typing import Optional, List, Dict
from pydantic import BaseModel, Field
from .resource_links import ResourceLinks
class ErrorSource(BaseModel):
pointer: Optional[str] = \
Field(None,
description="a JSON Pointer [RFC6901] to the associated"
" entity in the request document.")
parameter: Optional[str] = \
Field(None,
description="a string indicating which URI query parameter"
" caused the error.")
class Error(BaseModel):
"""https://jsonapi.org/format/#error-objects"""
id: Optional[str] = \
Field(None,
description="a unique identifier for this particular"
" occurrence of the problem.")
links: Optional[ResourceLinks] = \
Field(None,
description="a link that leads to further details about"
" this particular occurrence of the problem.")
status: Optional[str] = \
Field(None,
description="the HTTP status code applicable to this problem,"
" expressed as a string value.")
title: Optional[str] = \
Field(None,
description="a short, human-readable summary of the problem"
" that SHOULD NOT change from occurrence"
" to occurrence of the problem, except for"
" purposes of localization.")
detail: Optional[str] = \
Field(None,
description="a human-readable explanation specific to this"
" occurrence of the problem. Like title, this"
" field’s value can be localized.")
source: Optional[ErrorSource] = \
Field(None,
description="an object containing references to the source of"
" the error, optionally including pointer"
" or parameter fields.")
meta: Optional[Dict] = \
Field(None,
description="a meta object containing non-standard"
" meta-information about the error.")
class ErrorResponse(BaseModel):
errors: List[Error] = \
Field(...,
description="a list containing one of more error objects.")
|
maclandrol/ete
|
refs/heads/master
|
examples/treeview/random_draw.py
|
2
|
import os
from ete2 import Tree, faces
from ete2.treeview.main import TreeStyle, NodeStyle, random_color
import colorsys
import random
# ::::::::::::::
# Layout actions
# ::::::::::::::
def sphere_map(node):
# Creates a random color sphere face that will be floating over nodes
bubble = faces.CircleFace(random.randint(5,40), random_color(), "sphere")
bubble.opacity = 0.7
faces.add_face_to_node(bubble, node, 0, position="float")
def random_background(node):
# Set a random background color for each node partition
node.img_style["bgcolor"] = random_color()
def leaf_name(node):
if node.is_leaf():
nameF = faces.AttrFace("name")
nameF.border.width = 1
faces.add_face_to_node(nameF, node, 0, position="branch-right")
def aligned_faces(node):
if node.is_leaf():
for i in xrange(3):
F = faces.TextFace("ABCDEFGHIJK"[0:random.randint(1,11)])
F.border.width = 1
F.border.line_style = 1
F.inner_background.color = "lightgreen"
F.border.width = 1
F.inner_border.width = 1
F.background.color = "darkgreen"
F.border.width = 2
F.vt_align = random.randint(0,4)
F.hz_align = random.randint(0,4)
F.margin_bottom = random.randint(1, 20)
F.margin_right = random.randint(1, 20)
F.margin_left = random.randint(1, 20)
F.margin_top = random.randint(1, 20)
faces.add_face_to_node(F, node, i, position="aligned")
if random.randint(0, 1):
faces.add_face_to_node(F, node, i, position="aligned")
def master_ly(node):
random_background(node)
sphere_map(node)
leaf_name(node)
aligned_faces(node)
def tiny_ly(node):
node.img_style["size"] = 2
node.img_style["shape"] = "square"
size = 15
t = Tree()
t.populate(size, reuse_names=False)
I = TreeStyle()
I.mode = "r"
I.orientation = 0
I.layout_fn = master_ly
I.margin_left = 100
I.margin_right = 50
I.margin_top = 100
I.arc_start = 45
I.arc_span = 360
I.margin_bottom = 50
I.show_border = True
I.legend_position = 4
I.title.add_face(faces.TextFace("HOLA MUNDO", fsize=30), 0)
I.draw_aligned_faces_as_table = True
def test(node):
if node.is_leaf():
faces.add_face_to_node(faces.AttrFace("name"), node, 0, position="aligned")
I.aligned_header.add_face( faces.TextFace("H1"), 0 )
I.aligned_header.add_face( faces.TextFace("H1"), 1 )
I.aligned_header.add_face( faces.TextFace("H1"), 2 )
I.aligned_header.add_face( faces.TextFace("H1111111111111"), 3 )
I.aligned_header.add_face( faces.TextFace("H1"), 4 )
I.aligned_foot.add_face( faces.TextFace("FO1"), 0 )
I.aligned_foot.add_face( faces.TextFace("FO1"), 1 )
I.aligned_foot.add_face( faces.TextFace("FO1"), 2 )
I.aligned_foot.add_face( faces.TextFace("F1"), 3 )
I.aligned_foot.add_face( faces.TextFace("FO1"), 4 )
I.legend.add_face(faces.CircleFace(30, random_color(), "sphere"), 0)
I.legend.add_face(faces.CircleFace(30, random_color(), "sphere"), 0)
I.legend.add_face(faces.TextFace("HOLA"), 1)
I.legend.add_face(faces.TextFace("HOLA"), 1)
# Creates a random tree with 10 leaves
t2 = Tree()
t2.populate(10)
# Creates a fixed NodeStyle object containing a TreeFace (A tree image
# as a face within another tree image)
# t.add_face(faces.TreeFace(t2, I), "branch-right", 0)
# Attach the fixed style to the first child of the root node
# t.children[0].img_style = style
I.rotation = 90
I.mode = "c"
t.show(tree_style=I)
#t.render("/home/jhuerta/test.svg", img_properties=I)
#t.render("/home/jhuerta/test.pdf", img_properties=I)
#t.render("/home/jhuerta/test.png", img_properties=I)
#t.render("/home/jhuerta/test.ps", img_properties=I)
#os.system("inkscape /home/jhuerta/test.svg")
#I.mode = "c"
#t.show(img_properties=I)
|
IsCoolEntertainment/debpkg_python-boto
|
refs/heads/master
|
boto/ec2/elb/attributes.py
|
8
|
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Created by Chris Huegle for TellApart, Inc.
class CrossZoneLoadBalancingAttribute(object):
"""
Represents the CrossZoneLoadBalancing segement of ELB Attributes.
"""
def __init__(self, connection=None):
self.enabled = None
def __repr__(self):
return 'CrossZoneLoadBalancingAttribute(%s)' % (
self.enabled)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
class LbAttributes(object):
"""
Represents the Attributes of an Elastic Load Balancer.
"""
def __init__(self, connection=None):
self.connection = connection
self.cross_zone_load_balancing = CrossZoneLoadBalancingAttribute(
self.connection)
def __repr__(self):
return 'LbAttributes(%s)' % (
repr(self.cross_zone_load_balancing))
def startElement(self, name, attrs, connection):
if name == 'CrossZoneLoadBalancing':
return self.cross_zone_load_balancing
def endElement(self, name, value, connection):
pass
|
Khan/khan-linter
|
refs/heads/master
|
vendor/py3/pyflakes/scripts/pyflakes.py
|
50
|
"""
Implementation of the command-line I{pyflakes} tool.
"""
from __future__ import absolute_import
# For backward compatibility
__all__ = ['check', 'checkPath', 'checkRecursive', 'iterSourceCode', 'main']
from pyflakes.api import check, checkPath, checkRecursive, iterSourceCode, main
|
kyroskoh/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
|
124
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import tempfile
import unittest2 as unittest
from webkitpy.common.net.credentials import Credentials
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.user_mock import MockUser
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
from webkitpy.common.system.executive_mock import MockExecutive
# FIXME: Other unit tests probably want this class.
class _TemporaryDirectory(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
self._directory_path = None
def __enter__(self):
self._directory_path = tempfile.mkdtemp(**self._kwargs)
return self._directory_path
def __exit__(self, type, value, traceback):
os.rmdir(self._directory_path)
# Note: All tests should use this class instead of Credentials directly to avoid using a real Executive.
class MockedCredentials(Credentials):
def __init__(self, *args, **kwargs):
if 'executive' not in kwargs:
kwargs['executive'] = MockExecutive()
Credentials.__init__(self, *args, **kwargs)
class CredentialsTest(unittest.TestCase):
example_security_output = """keychain: "/Users/test/Library/Keychains/login.keychain"
class: "inet"
attributes:
0x00000007 <blob>="bugs.webkit.org (test@webkit.org)"
0x00000008 <blob>=<NULL>
"acct"<blob>="test@webkit.org"
"atyp"<blob>="form"
"cdat"<timedate>=0x32303039303832353233353231365A00 "20090825235216Z\000"
"crtr"<uint32>=<NULL>
"cusi"<sint32>=<NULL>
"desc"<blob>="Web form password"
"icmt"<blob>="default"
"invi"<sint32>=<NULL>
"mdat"<timedate>=0x32303039303930393137323635315A00 "20090909172651Z\000"
"nega"<sint32>=<NULL>
"path"<blob>=<NULL>
"port"<uint32>=0x00000000
"prot"<blob>=<NULL>
"ptcl"<uint32>="htps"
"scrp"<sint32>=<NULL>
"sdmn"<blob>=<NULL>
"srvr"<blob>="bugs.webkit.org"
"type"<uint32>=<NULL>
password: "SECRETSAUCE"
"""
def test_keychain_lookup_on_non_mac(self):
class FakeCredentials(MockedCredentials):
def _is_mac_os_x(self):
return False
credentials = FakeCredentials("bugs.webkit.org")
self.assertFalse(credentials._is_mac_os_x())
self.assertEqual(credentials._credentials_from_keychain("foo"), ["foo", None])
def test_security_output_parse(self):
credentials = MockedCredentials("bugs.webkit.org")
self.assertEqual(credentials._parse_security_tool_output(self.example_security_output), ["test@webkit.org", "SECRETSAUCE"])
def test_security_output_parse_entry_not_found(self):
# FIXME: This test won't work if the user has a credential for foo.example.com!
credentials = Credentials("foo.example.com")
if not credentials._is_mac_os_x():
return # This test does not run on a non-Mac.
# Note, we ignore the captured output because it is already covered
# by the test case CredentialsTest._assert_security_call (below).
outputCapture = OutputCapture()
outputCapture.capture_output()
self.assertIsNone(credentials._run_security_tool())
outputCapture.restore_output()
def _assert_security_call(self, username=None):
executive_mock = Mock()
credentials = MockedCredentials("example.com", executive=executive_mock)
expected_logs = "Reading Keychain for example.com account and password. Click \"Allow\" to continue...\n"
OutputCapture().assert_outputs(self, credentials._run_security_tool, [username], expected_logs=expected_logs)
security_args = ["/usr/bin/security", "find-internet-password", "-g", "-s", "example.com"]
if username:
security_args += ["-a", username]
executive_mock.run_command.assert_called_with(security_args)
def test_security_calls(self):
self._assert_security_call()
self._assert_security_call(username="foo")
def test_credentials_from_environment(self):
credentials = MockedCredentials("example.com")
saved_environ = os.environ.copy()
os.environ['WEBKIT_BUGZILLA_USERNAME'] = "foo"
os.environ['WEBKIT_BUGZILLA_PASSWORD'] = "bar"
username, password = credentials._credentials_from_environment()
self.assertEqual(username, "foo")
self.assertEqual(password, "bar")
os.environ = saved_environ
def test_read_credentials_without_git_repo(self):
# FIXME: This should share more code with test_keyring_without_git_repo
class FakeCredentials(MockedCredentials):
def _is_mac_os_x(self):
return True
def _credentials_from_keychain(self, username):
return ("test@webkit.org", "SECRETSAUCE")
def _credentials_from_environment(self):
return (None, None)
with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
credentials = FakeCredentials("bugs.webkit.org", cwd=temp_dir_path)
# FIXME: Using read_credentials here seems too broad as higher-priority
# credential source could be affected by the user's environment.
self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "SECRETSAUCE"))
def test_keyring_without_git_repo(self):
# FIXME: This should share more code with test_read_credentials_without_git_repo
class MockKeyring(object):
def get_password(self, host, username):
return "NOMNOMNOM"
class FakeCredentials(MockedCredentials):
def _is_mac_os_x(self):
return True
def _credentials_from_keychain(self, username):
return ("test@webkit.org", None)
def _credentials_from_environment(self):
return (None, None)
with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
credentials = FakeCredentials("fake.hostname", cwd=temp_dir_path, keyring=MockKeyring())
# FIXME: Using read_credentials here seems too broad as higher-priority
# credential source could be affected by the user's environment.
self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "NOMNOMNOM"))
def test_keyring_without_git_repo_nor_keychain(self):
class MockKeyring(object):
def get_password(self, host, username):
return "NOMNOMNOM"
class FakeCredentials(MockedCredentials):
def _credentials_from_keychain(self, username):
return (None, None)
def _credentials_from_environment(self):
return (None, None)
class FakeUser(MockUser):
@classmethod
def prompt(cls, message, repeat=1, raw_input=raw_input):
return "test@webkit.org"
@classmethod
def prompt_password(cls, message, repeat=1, raw_input=raw_input):
raise AssertionError("should not prompt for password")
with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
credentials = FakeCredentials("fake.hostname", cwd=temp_dir_path, keyring=MockKeyring())
# FIXME: Using read_credentials here seems too broad as higher-priority
# credential source could be affected by the user's environment.
self.assertEqual(credentials.read_credentials(FakeUser), ("test@webkit.org", "NOMNOMNOM"))
|
vabs22/zulip
|
refs/heads/master
|
zerver/migrations/0062_default_timezone.py
|
14
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-16 12:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0061_userprofile_timezone'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='timezone',
field=models.CharField(default='', max_length=40),
),
]
|
cesarpazguzman/The-Eternal-Sorrow
|
refs/heads/master
|
dependencies/luabind/boost-build/test/conditionals2.py
|
4
|
#!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Regression test: it was possible that due to evaluation of conditional
# requirements, two different values of non-free features were present in
# property set.
from BoostBuild import Tester, List
t = Tester()
t.write("project-root.jam", "")
t.write("a.cpp", "")
t.write("Jamfile", """
import feature : feature ;
import common : file-creation-command ;
feature the_feature : false true : propagated ;
rule maker ( targets * : sources * : properties * )
{
if <the_feature>false in $(properties)
&& <the_feature>true in $(properties)
{
EXIT "Oops, two different values of non-free feature" ;
}
CMD on $(targets) = [ file-creation-command ] ;
}
actions maker
{
$(CMD) $(<) ;
}
make a : a.cpp : maker : <variant>debug:<the_feature>true ;
""")
t.run_build_system()
t.cleanup()
|
leafo/FrameworkBenchmarks
|
refs/heads/master
|
pyramid/setup_benchmark.py
|
5
|
import subprocess
import setup_util
import multiprocessing
import os
home = os.path.expanduser('~')
bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py3/bin')
NCPU = multiprocessing.cpu_count()
proc = None
def start(args):
global proc
setup_util.replace_text(
"frameworkbenchmarks/models.py",
"DBHOSTNAME = 'localhost'",
"DBHOSTNAME = '%s'" % args.database_host
)
proc = subprocess.Popen([
bin_dir + '/gunicorn',
'wsgi:app',
'-b', "0.0.0.0:6543",
'-w', str(NCPU*3)],
cwd='pyramid'
)
return 0
def stop():
global proc
if proc is not None:
proc.terminate()
proc.wait()
return 0
|
lucasvo/heroku-buildpack-python-with-node
|
refs/heads/master
|
test/django-1.4-skeleton/haystack/urls.py
|
128
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'haystack.views.home', name='home'),
# url(r'^haystack/', include('haystack.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
alyosha1879/ryu
|
refs/heads/dev
|
ryu/lib/hub.py
|
21
|
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
# we don't bother to use cfg.py because monkey patch needs to be
# called very early. instead, we use an environment variable to
# select the type of hub.
HUB_TYPE = os.getenv('RYU_HUB_TYPE', 'eventlet')
LOG = logging.getLogger('ryu.lib.hub')
if HUB_TYPE == 'eventlet':
import eventlet
import eventlet.event
import eventlet.queue
import eventlet.semaphore
import eventlet.timeout
import eventlet.wsgi
from ryu.contrib._eventlet import websocket
import greenlet
import ssl
import socket
import traceback
getcurrent = eventlet.getcurrent
patch = eventlet.monkey_patch
sleep = eventlet.sleep
listen = eventlet.listen
connect = eventlet.connect
def spawn(*args, **kwargs):
def _launch(func, *args, **kwargs):
# mimic gevent's default raise_error=False behaviour
# by not propergating an exception to the joiner.
try:
func(*args, **kwargs)
except greenlet.GreenletExit:
pass
except:
# log uncaught exception.
# note: this is an intentional divergence from gevent
# behaviour. gevent silently ignores such exceptions.
LOG.error('hub: uncaught exception: %s',
traceback.format_exc())
return eventlet.spawn(_launch, *args, **kwargs)
def spawn_after(seconds, *args, **kwargs):
def _launch(func, *args, **kwargs):
# mimic gevent's default raise_error=False behaviour
# by not propergating an exception to the joiner.
try:
func(*args, **kwargs)
except greenlet.GreenletExit:
pass
except:
# log uncaught exception.
# note: this is an intentional divergence from gevent
# behaviour. gevent silently ignores such exceptions.
LOG.error('hub: uncaught exception: %s',
traceback.format_exc())
return eventlet.spawn_after(seconds, _launch, *args, **kwargs)
def kill(thread):
thread.kill()
def joinall(threads):
for t in threads:
# this try-except is necessary when killing an inactive
# greenthread
try:
t.wait()
except greenlet.GreenletExit:
pass
Queue = eventlet.queue.Queue
QueueEmpty = eventlet.queue.Empty
Semaphore = eventlet.semaphore.Semaphore
BoundedSemaphore = eventlet.semaphore.BoundedSemaphore
class StreamServer(object):
def __init__(self, listen_info, handle=None, backlog=None,
spawn='default', **ssl_args):
assert backlog is None
assert spawn == 'default'
if ':' in listen_info[0]:
self.server = eventlet.listen(listen_info,
family=socket.AF_INET6)
else:
self.server = eventlet.listen(listen_info)
if ssl_args:
def wrap_and_handle(sock, addr):
ssl_args.setdefault('server_side', True)
handle(ssl.wrap_socket(sock, **ssl_args), addr)
self.handle = wrap_and_handle
else:
self.handle = handle
def serve_forever(self):
while True:
sock, addr = self.server.accept()
spawn(self.handle, sock, addr)
class LoggingWrapper(object):
def write(self, message):
LOG.info(message.rstrip('\n'))
class WSGIServer(StreamServer):
def serve_forever(self):
self.logger = LoggingWrapper()
eventlet.wsgi.server(self.server, self.handle, self.logger)
WebSocketWSGI = websocket.WebSocketWSGI
Timeout = eventlet.timeout.Timeout
class Event(object):
def __init__(self):
self._ev = eventlet.event.Event()
self._cond = False
def _wait(self, timeout=None):
while not self._cond:
self._ev.wait()
def _broadcast(self):
self._ev.send()
# because eventlet Event doesn't allow mutiple send() on an event,
# re-create the underlying event.
# note: _ev.reset() is obsolete.
self._ev = eventlet.event.Event()
def is_set(self):
return self._cond
def set(self):
self._cond = True
self._broadcast()
def clear(self):
self._cond = False
def wait(self, timeout=None):
if timeout is None:
self._wait()
else:
try:
with Timeout(timeout):
self._wait()
except Timeout:
pass
return self._cond
|
nadley/Sick-Beard
|
refs/heads/development
|
lib/requests/packages/urllib3/response.py
|
65
|
# urllib3/response.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import gzip
import logging
import zlib
from io import BytesIO
from .exceptions import DecodeError
from .packages.six import string_types as basestring
log = logging.getLogger(__name__)
def decode_gzip(data):
gzipper = gzip.GzipFile(fileobj=BytesIO(data))
return gzipper.read()
def decode_deflate(data):
try:
return zlib.decompress(data)
except zlib.error:
return zlib.decompress(data, -zlib.MAX_WBITS)
class HTTPResponse(object):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = {
'gzip': decode_gzip,
'deflate': decode_deflate,
}
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = headers or {}
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self._decode_content = decode_content
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in [301, 302, 303, 307]:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, decoding and caching
is skipped because we can't decode partial content nor does it make
sense to cache partial content as the full response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header. (Overridden if ``amt`` is set.)
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
content_encoding = self.headers.get('content-encoding')
decoder = self.CONTENT_DECODERS.get(content_encoding)
if decode_content is None:
decode_content = self._decode_content
if self._fp is None:
return
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
else:
return self._fp.read(amt)
try:
if decode_content and decoder:
data = decoder(data)
except (IOError, zlib.error):
raise DecodeError("Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding)
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
# Normalize headers between different versions of Python
headers = {}
for k, v in r.getheaders():
# Python 3: Header keys are returned capitalised
k = k.lower()
has_value = headers.get(k)
if has_value: # Python 3: Repeating header keys are unmerged.
v = ', '.join([has_value, v])
headers[k] = v
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
|
davidpng/FCS_Database
|
refs/heads/master
|
FlowAnal/subcommands/template-query_do.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Template script for selecting a set of .fcs files and operating on them one-by-one
NOTE: There are some files that are not found because of discordance of filename \
and filename internal to .fcs file (meta info)
"""
import logging
from os import path
from FlowAnal.database.FCS_database import FCSdatabase
from FlowAnal.FCS import FCS
from __init__ import add_filter_args
log = logging.getLogger(__name__)
def build_parser(parser):
parser.add_argument('dir', help='Base directory containing .fcs files',
type=str)
parser.add_argument('-db', '--db', help='Input sqlite db containing flow meta data \
[default: db/fcs.db]',
default="db/fcs.db", type=str)
add_filter_args(parser)
def action(args):
# Connect to database
db = FCSdatabase(db=args.db, rebuild=False)
# Create query
q = db.query(exporttype='dict_dict', getfiles=True, **vars(args))
for case, case_info in q.results.items():
for case_tube_idx, relpath in case_info.items():
log.info("Case: %s, Case_tube_idx: %s, File: %s" % (case, case_tube_idx, relpath))
filepath = path.join(args.dir, relpath)
a = FCS(filepath=filepath, case_tube_idx=case_tube_idx)
# Do something
print a.case_tube
|
bearstech/modoboa
|
refs/heads/master
|
modoboa/relaydomains/apps.py
|
1
|
"""AppConfig for relaydomains."""
from __future__ import unicode_literals
from django.apps import AppConfig
from django.utils.translation import ugettext as _
def load_relaydomains_settings():
"""Load application settings."""
from modoboa.parameters import tools as param_tools
from .app_settings import AdminParametersForm
param_tools.registry.add("global", AdminParametersForm, _("Relay domains"))
class RelayDomainsConfig(AppConfig):
"""App configuration."""
name = "modoboa.relaydomains"
verbose_name = "Modoboa relay domains"
def ready(self):
load_relaydomains_settings()
from . import handlers
|
siavooshpayandehazad/SoCDep2
|
refs/heads/master
|
src/main/python/Simulator/simulator_functions.py
|
2
|
# Copyright (C) 2016 Siavoosh Payandeh Azad
import numpy
import copy
from ast import literal_eval
from ConfigAndPackages import Config
from SystemHealthMonitoring import SHMU_Functions
import matplotlib.pyplot as plt
def generate_random_fault_time_dict(runtime, shm):
"""
generates random fault time dictionary
:param runtime: simulation runtime
:param shm: system health map
:return: fault time dictionary
"""
fault_time_dict = {}
fault_time = 0
time_until_next_fault = numpy.random.normal(Config.MTBF, Config.SD4MTBF)
fault_time += time_until_next_fault
while fault_time < runtime:
fault_location, fault_type = SHMU_Functions.random_fault_generation(shm)
fault_time_dict[float("{0:.1f}".format(fault_time))] = (fault_location, fault_type)
time_until_next_fault = numpy.random.normal(Config.MTBF, Config.SD4MTBF)
fault_time += time_until_next_fault
fault_file = open('Generated_Files/Injected_Faults.txt', 'w')
for item in fault_time_dict:
fault_file.write(str(item)+"\t"+str(fault_time_dict[item][0])+"\t"+str(fault_time_dict[item][1])+"\n")
draw_faults_locations(fault_time_dict)
return fault_time_dict
def generate_fault_time_dict_from_file():
fault_time_dict = {}
fault_file = open(Config.fault_injection_file, 'r')
line = fault_file.readline()
while line != '':
line = line.rstrip()
fault_item = line.split('\t')
# print(literal_eval(fault_item[1]))
fault_time_dict[float("{0:.1f}".format(float(fault_item[0])))] = (literal_eval(fault_item[1]), fault_item[2])
line = fault_file.readline()
draw_faults_locations(fault_time_dict)
return fault_time_dict
def update_fault_time_dict(current_time, fault_time_dictionary):
temp_dict = {}
for fault_time in fault_time_dictionary.keys():
if fault_time < current_time:
pass
else:
dict_value = fault_time_dictionary[fault_time]
temp_dict[fault_time-current_time] = dict_value
return temp_dict
def draw_faults_locations(fault_time_dict):
plt.figure()
location_time_dictionary = {}
for item in fault_time_dict:
fault_location = fault_time_dict[item][0]
if fault_location in location_time_dictionary.keys():
location_time_dictionary[fault_location].append(int(item))
else:
location_time_dictionary[fault_location] = [int(item)]
for location in location_time_dictionary.keys():
time_list = location_time_dictionary[location]
# print(location, time_list)
values = []
x_axis = []
for i in range(0, int(Config.ProgramRunTime)):
if i == 0:
values.append(2)
else:
if i in time_list:
values.append(1)
else:
values.append(0)
x_axis.append(i)
# print(values)
# print(x_axis)
# print("---------------------------")
plt.xlim(xmin=0, xmax=Config.ProgramRunTime)
plt.bar(x_axis, values, align='center')
plt.savefig("GraphDrawings/Components_Fault_Drawings/Fault_config_for_loc_"+str(location)+".png", dpi=100)
# plt.xticks(range(len(D)), D.keys())
plt.clf()
plt.close()
return None
|
alexbruy/QGIS
|
refs/heads/master
|
python/ext-libs/future/future/moves/http/cookies.py
|
83
|
from __future__ import absolute_import
from future.utils import PY3
if PY3:
from http.cookies import *
else:
__future_module__ = True
from Cookie import *
from Cookie import Morsel # left out of __all__ on Py2.7!
|
sjfloat/eucalyptus
|
refs/heads/master
|
clc/eucadmin/eucadmin/validator.py
|
7
|
#!/usr/bin/python
#
# Copyright 2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import yaml
import os
import argparse
import json
import subprocess
import sys
import paramiko
import urllib
import urlparse
import logging
from .configfile import ConfigFile
from .cfg import AdminConfig
from .describeservices import DescribeServices
from .describenodes import DescribeNodes
from .sshconnection import SshConnection
from .constants import *
from eucadmin import debug
sys.excepthook = debug.gen_except_hook(True, True)
# Currently, these three are the only components returned by
# DescribeServices that we traverse.
COMPONENT_MAP = { 'cluster': 'CC',
'storage': 'SC',
'walrus': 'WS',
}
LOGLEVELS = ['DEBUG', 'INFO', 'WARN', 'WARNING', 'ERROR', 'CRITICAL']
def read_validator_config(files=[]):
'''
Read one or more YAML config files and merge them.
'''
def merge(base, overlay):
if isinstance(base, dict) and isinstance(overlay, dict):
for k,v in overlay.iteritems():
if k not in base:
base[k] = v
else:
base[k] = merge(base[k],v)
elif isinstance(base, list) and isinstance(overlay, list):
# We could use sets here, but this preserves
# ordering, simply eliminating duplicates
base.extend([ x for x in overlay if x not in base ])
return base
data = {}
for f in files:
if os.path.exists(f):
data = merge(data, yaml.load(open(f, 'r').read()))
return data
def build_parser():
parser = argparse.ArgumentParser(description='Eucalyptus cloud validator')
parser.add_argument('stage', choices=('preinstall', 'postinstall',
'register', 'monitor'),
default='monitor',
help='Which test stage to run (default: monitor)')
parser.add_argument('-c', '--config-file',
default=DEFAULT_CONFIG_FILE,
help='The path to the eucadmin config')
parser.add_argument('-C', '--component',
default='CLC',
help='The cloud component role(s) of this system.')
parser.add_argument('-t', '--traverse',
action='store_true',
help='Traverse other components in the cloud (requires ssh credentials)')
group = parser.add_mutually_exclusive_group()
group.add_argument('-j', '--json',
action='store_true',
help='Output JSON-formatted results')
group.add_argument('-q', '--quiet',
action='store_true',
help='No output; only a return code')
parser.add_argument('-l', '--log-level', choices=LOGLEVELS,
default='INFO', type=str.upper,
help='Log level (default: INFO)')
parser.add_argument('-s', '--subtask', action='store_true',
help=argparse.SUPPRESS) # subtask of another validator
return parser
def run_script(scriptPath):
po = subprocess.Popen([scriptPath],
stdout=subprocess.PIPE,
cwd='/')
stdout = po.communicate()[0]
return stdout
class Validator(object):
def __init__(self, stage="monitor", component="CLC", traverse=False,
config_file=DEFAULT_CONFIG_FILE, log_level="INFO",
subtask=False, **kwargs):
# TODO: allow a component list?
os.environ['EUCA_ROLES'] = component
os.environ['TERM'] = 'dumb'
self.stage = stage
self.component = component
self.traverse = traverse
self.admincfg = AdminConfig(config_file)
self.subtask = subtask
if stage != 'preinstall' or (component == "CC" and traverse):
self.euca_conf = ConfigFile(os.path.join(self.admincfg.eucalyptus,
EUCA_CONF_FILE))
self.setupLogging(logging.getLevelName(log_level))
def setupLogging(self, level=logging.INFO):
logging.basicConfig()
self.log = logging.getLogger('eucadmin.validator')
self.log.setLevel(level)
@classmethod
def run(cls):
parser = build_parser()
args = parser.parse_args()
obj = cls(**vars(args))
result = obj.main()
if args.json:
print json.dumps(result, indent=4)
elif args.quiet:
sys.exit(Validator.check_nested_result("", result))
else:
sys.exit(Validator.check_nested_result("", result, print_output=True))
def log_nested_result(self, parent, result):
for key in result.keys():
if type(result[key]) != dict:
raise Exception("Error parsing validation data")
if result[key].has_key('cmd'):
self.log_nested_result(parent + key + ":",
result[key]['output'])
elif not result[key].has_key('failed'):
self.log_nested_result(parent + key + ":",
result[key])
else:
for level in LOGLEVELS:
if result[key].has_key(level):
self.log.log(logging.getLevelName(level.upper()),
"%s%s: %s" % (parent, key , result[key][level]))
@staticmethod
def check_nested_result(parent, result, print_output=False):
failed = False
for key in result.keys():
if type(result[key]) != dict:
raise Exception("Error parsing validation data")
if result[key].has_key('cmd'):
failed |= Validator.check_nested_result(parent + key + ":",
result[key]['output'],
print_output=print_output)
elif not result[key].has_key('failed'):
failed |= Validator.check_nested_result(parent + key + ":",
result[key],
print_output=print_output)
else:
if int(result[key]['failed']):
if print_output:
print "%s%s: %s" % (parent, key, result[key].get('error', "No details provided"))
failed = True
return failed
def run_remote(self, host, component, stage, traverse=False, dbg=False):
t=traverse and "-t" or ""
ssh = SshConnection(host, username="root")
# NB: euca-validator must be in the PATH and must have a usable
# configuration on the remote system!
cmd = 'euca-validator %s -C %s %s -j -s' % (t, COMPONENT_MAP.get(component, component), stage)
out = ssh.cmd(cmd, timeout=600, get_pty=False)
try:
out['output'] = json.loads(out['output'])
self.log_nested_result("%s-%s:" % (host, component), out['output'])
return out
except Exception, e:
self.log.warn("Remote command failed: %s" % out['output'])
return {'cmd': cmd, "output": { "euca-validator": { "failed": 1, "error": str(e) } } }
finally:
ssh.close()
def main(self):
self.log.debug("Reading configuration files: %s" % self.admincfg.validator_config_path)
data = read_validator_config(files=self.admincfg.validator_config_path.split(':'))
result = {}
self.log.debug('Script search path is %s' % self.admincfg.validator_script_path)
for script in data.get(self.stage, {}).get(self.component, []):
for dirpath in self.admincfg.validator_script_path.split(':'):
scriptPath = os.path.join(dirpath, script)
if os.path.exists(scriptPath):
self.log.debug('Running script: %s' % scriptPath)
return_val = run_script(scriptPath)
try:
result[script] = json.loads(return_val)
except Exception, e:
self.log.error("Script %s did not return valid JSON." % scriptPath)
self.log.debug("returned data was %s" % return_val)
break
for level in LOGLEVELS:
if result[script].has_key(level):
if not self.subtask:
self.log.log(logging.getLevelName(level.upper()),
"%s: %s" % (script, result[script][level]))
break
if not result.has_key(script):
self.log.error("script %s not found" % script)
if self.component == "CLC" and self.traverse:
# describe-services or get from config file
ds = DescribeServices(url='http://localhost:8773',)
data = ds.main()
hosts = []
for service in data['euca:DescribeServicesResponseType']['euca:serviceStatuses']:
hostname = urllib.splitport(urlparse.urlparse(service['euca:serviceId']['euca:uri']).netloc)[0]
status = service['euca:localState']
component_type = service['euca:serviceId']['euca:type']
if COMPONENT_MAP.get(component_type):
hosts.append((hostname, component_type, status))
for host, component_type, status in hosts:
# print "running sub-check: %s - %s - %s" % (host, component_type, args.stage)
result['-'.join([host, component_type])] = self.run_remote(host, component_type,
self.stage,
traverse=self.traverse,
dbg=True)
elif self.component == "CC" and self.traverse:
# describe nodes is a CLC call; get from config file
# dn = DescribeNodes(url='http://localhost:8773',)
# data = dn.main()
for host in self.euca_conf["NODES"].split():
result[host] = self.run_remote(host, "NC", self.stage, dbg=True)
return result
|
romanornr/viacoin
|
refs/heads/master
|
test/functional/test_framework/bignum.py
|
59
|
#!/usr/bin/env python3
#
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Big number routines.
This file is copied from python-bitcoinlib.
"""
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# bitcoin-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
|
JackNokia/robotframework
|
refs/heads/master
|
atest/testdata/test_libraries/MyLibDir/ClassLib.py
|
37
|
class ClassLib(object):
def keyword_in_mylibdir_classlib(self):
pass
|
krzysztof/invenio-sipstore
|
refs/heads/master
|
invenio_sipstore/archivers/bagit_archiver.py
|
1
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Archivers for SIP."""
import json
from copy import deepcopy
from datetime import datetime
from flask import current_app
from fs.path import join
from invenio_db import db
from invenio_jsonschemas.errors import JSONSchemaNotFound
from jsonschema import validate
from invenio_sipstore.archivers import BaseArchiver
from invenio_sipstore.models import SIPMetadata, SIPMetadataType, \
current_jsonschemas
class BagItArchiver(BaseArchiver):
"""BagIt archiver for SIP files."""
# Name of the SIPMetadataType for internal use of BagItArchiver
bagit_metadata_type_name = 'bagit'
def __init__(self, sip, tags=None):
"""Constuctor.
:param sip: API instance of the SIP that is to be archived.
:type sip: invenio_sipstore.api.SIP
:param dict tags: a dictionnary for the tags of the bagit
"""
super(BagItArchiver, self).__init__(sip)
self.tags = tags or {}
# overrides BaseArchiver.get_all_files()
def get_all_files(self):
"""Return the complete list of files in the archive.
All the files + all the metadata + bagit information.
:return: the list of all relative final path
"""
files = super(BagItArchiver, self).get_all_files()
return files + ['manifest-md5.txt', 'bagit.txt', 'bag-info.txt',
'tagmanifest-md5.txt']
@classmethod
def get_bagit_metadata_type(cls):
"""Property for the metadata type for BagIt generation."""
return SIPMetadataType.get_from_name(cls.bagit_metadata_type_name)
@classmethod
def get_bagit_metadata(cls, sip):
"""Fetch the BagIt metadata information.
:param sip: SIP for which to fetch the metadata.
:returns: Return the BagIt metadata information (SIPMetadata) instace
or None if the object does not exist.
"""
return SIPMetadata.query.filter_by(
sip_id=sip.id,
type_id=cls.get_bagit_metadata_type().id).one_or_none()
@classmethod
def get_bagit_metadata_json(cls, sip):
"""Get the JSON (dict) of the associated BagIt metadata.
Shortcut method for loading the JSON directly from the associated
SIPMetadata object.
"""
bagit_meta = cls.get_bagit_metadata(sip)
if bagit_meta:
return json.loads(bagit_meta.content)
else:
return None
def _is_fetched(self, file_info):
"""Determine if file info specifies a file that is fetched."""
return 'fetched' in file_info and file_info['fetched']
def create_bagit_metadata(
self, patch_of=None, include_missing_files=False,
filesdir='data/files', metadatadir='data/metadata'):
"""Create the BagIt metadata object."""
sip_data_files = [] # Record's data + Record metadata dumps
sipfiles = self.sip.files
if patch_of:
sipfiles = [] # We select SIPFiles for writing manually
prev_files = self.get_bagit_metadata_json(patch_of)['datafiles']
# Helper mappings
# File UUID-to-manifest-item mapping (select only the data files,
# not the metadata).
id2mi = dict((f['file_uuid'], f) for f in
prev_files if 'file_uuid' in f)
# File UUID-to-SIP File mapping
id2sf = dict((str(file.file.id), file) for file in self.sip.files)
manifest_files_s = set(id2mi.keys())
sip_files_s = set(id2sf.keys())
if include_missing_files:
fetched_uuids = manifest_files_s
else:
fetched_uuids = manifest_files_s & sip_files_s
stored_uuids = sip_files_s - manifest_files_s
for uuid in fetched_uuids:
fi = deepcopy(id2mi[uuid])
if uuid in id2sf:
filename = join(filesdir, id2sf[uuid].filepath)
else:
filename = id2mi[uuid]['filename']
fi['filename'] = filename
fi['fetched'] = True
sip_data_files.append(fi)
for uuid in stored_uuids:
sipfiles.append(id2sf[uuid])
# Copy the files
files_info = super(BagItArchiver, self).create(
filesdir=filesdir, metadatadir=metadatadir, sipfiles=sipfiles,
dry_run=True)
sip_data_files.extend(files_info)
# Add the files from fetch.txt to the files_info dictionary,
# so they will be included in generated manifest-md5.txt file
self.autogenerate_tags(files_info)
# Generate the BagIt metadata files (manifest-md5.txt, bagit.txt etc.)
bagit_meta_files = []
if any(self._is_fetched(fi) for fi in sip_data_files):
funcs = [((self.get_fetch_file, (sip_data_files, ), )), ]
else:
funcs = []
funcs.extend([
(self.get_manifest_file, (sip_data_files, ), ),
(self.get_bagit_file, (), ),
(self.get_baginfo_file, (), ),
(self.get_tagmanifest, (bagit_meta_files, ), ), # Needs to be last
])
for func, args in funcs:
fn, content = func(*args)
fi = self._save_file(fn, content, dry_run=True)
fi['content'] = content
bagit_meta_files.append(fi)
bagit_schema = current_jsonschemas.path_to_url(
current_app.config['SIPSTORE_DEFAULT_BAGIT_JSONSCHEMA'])
bagit_metadata = {
'datafiles': sip_data_files,
'bagitfiles': bagit_meta_files,
'$schema': bagit_schema,
}
# Validate the BagIt metadata with JSONSchema
schema_path = current_jsonschemas.url_to_path(bagit_schema)
schema = current_jsonschemas.get_schema(schema_path)
validate(bagit_metadata, schema)
# Create the BagIt schema object
with db.session.begin_nested():
obj = SIPMetadata(
sip_id=self.sip.id,
type_id=BagItArchiver.get_bagit_metadata_type().id,
content=json.dumps(bagit_metadata))
db.session.add(obj)
def create(self, patch_of=None, include_missing_files=False,
filesdir="data/files", metadatadir="data/metadata"):
"""Archive the SIP generating a BagIt file.
When specifying 'patch_of' parameter the 'include_missing_files'
flag determines whether files that are missing in the archived SIP
(w.r.t. the SIP specified in 'patch_of') should be treated as
explicitly deleted (include_missing_files=False) or if they
should still be included in the manifest.
Example:
include_missing_files = True
SIP_1:
SIPFiles: a.txt, b.txt
BagIt Manifest: a.txt, b.txt
SIP_2 (Bagged with patch_of=SIP_1):
SIPFiles: b.txt, c.txt
BagIt Manifest: a.txt, b.txt, c.txt
fetch.txt: a.txt, b.txt
include_missing_files = False
SIP_1:
SIPFiles: a.txt, b.txt
BagIt Manifest: a.txt, b.txt
SIP_2 (Bagged with patch_of=SIP_1):
SIPFIles: b.txt, c.txt
BagIt Manifest: b.txt, c.txt
fetch.txt: b.txt
:param bool create_bagit_metadata: At the end of archiving,
create a SIPMetadata object for this SIP, which
will contain the metadata of the BagIt contents.
It is necessary to bag the SIPs with this option enabled, if
one wants to make use of 'patch_of' later on.
:param patch_of: Write a lightweight BagIt, which will archive only
the new files, and refer to the repeated ones in "fetch.txt" file.
The provided argument is a SIP, which will be taken as a base
for determining the "diff" between two bags.
Provided SIP needs to have a special 'bagit'-named SIPMetadata
object associated with it, i.e. it had to have been previously
archived with the 'create_bagit_metadata' flag.
:type patch_of: invenio_sipstore.models.SIP or None
:type bool include_missing_files: If set to True and if 'patch_of' is
used, include the files that are missing in the SIP w.r.t. to
the 'patch_of' SIP in the manifest.
The opposite (include_missing_files=False) is equivalent to
treating those as explicitly deleted - the files will not be
included in the manifest, nor in the "fetch.txt" file.
:returns: a dictionary with the filenames as keys, and size and
checksum as value
:rtype: dict
"""
bagit_meta = self.get_bagit_metadata_json(self.sip)
if not bagit_meta:
self.create_bagit_metadata(
patch_of=patch_of, include_missing_files=include_missing_files,
filesdir=filesdir, metadatadir=metadatadir)
bagit_meta = self.get_bagit_metadata_json(self.sip)
archived_data = [fi for fi in bagit_meta['datafiles']
if not self._is_fetched(fi) and 'file_uuid' in fi]
fetched_data = [fi for fi in bagit_meta['datafiles']
if self._is_fetched(fi)]
archived_s = set([f['file_uuid'] for f in archived_data])
# Create a set for fetching
sipfiles = [f for f in self.sip.files if str(f.file.id) in archived_s]
files_info = super(BagItArchiver, self).create(
filesdir=filesdir, metadatadir=metadatadir, sipfiles=sipfiles)
files_info.extend(fetched_data)
for fi in bagit_meta['bagitfiles']:
out_fi = self._save_file(fi['filename'], fi['content'])
assert fi['path'] == out_fi['path']
files_info.append(out_fi)
return files_info
def autogenerate_tags(self, files_info):
"""Generate the automatic tags."""
self.tags['Bagging-Date'] = datetime.now().strftime(
"%Y-%m-%d_%H:%M:%S:%f")
self.tags['Payload-Oxum'] = '{0}.{1}'.format(
sum([f['size'] for f in files_info]), len(files_info))
def get_fetch_file(self, files_info):
"""Generate the contents of the fetch.txt file."""
content = ('{0} {1} {2}'.format(f['path'], f['size'], f['filename'])
for f in files_info if self._is_fetched(f))
return 'fetch.txt', '\n'.join(content)
def get_manifest_file(self, files_info):
"""Create the manifest file specifying the checksum of the files.
:return: the name of the file and its content
:rtype: tuple
"""
content = ('{0} {1}'.format(self._get_checksum(
f['checksum']), f['filename']) for f in files_info)
return 'manifest-md5.txt', '\n'.join(content)
def get_bagit_file(self):
"""Create the bagit file which specify the version and encoding.
:return: the name of the file and its content
:rtype: tuple
"""
content = 'BagIt-Version: 0.97\nTag-File-Character-Encoding: UTF-8'
return 'bagit.txt', content
def get_baginfo_file(self):
"""Create the baginfo file using the tags.
:return: the name of the file and its content
:rtype: tuple
"""
content_items = ('{0}: {1}'.format(k, v)
for k, v in self.tags.items())
content = '\n'.join(content_items)
return 'bag-info.txt', content
def get_tagmanifest(self, files_info):
"""Create the tagmanifest file using the files info.
:return: the name of the file and its content
:rtype: tuple
"""
files_info = [fi for fi in files_info if fi['filename']]
name, content = self.get_manifest_file(files_info)
return 'tagmanifest-md5.txt', content
@staticmethod
def _get_checksum(checksum, expected='md5'):
"""Return the checksum if the type is the expected."""
checksum = checksum.split(':')
if checksum[0] != expected or len(checksum) != 2:
raise AttributeError('Checksum format is not correct.')
else:
return checksum[1]
|
liw/daos
|
refs/heads/master
|
src/tests/ftest/erasurecode/ec_offline_rebuild.py
|
1
|
#!/usr/bin/python
'''
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
from ec_utils import ErasureCodeIor
from apricot import skipForTicket
class EcOfflineRebuild(ErasureCodeIor):
# pylint: disable=too-many-ancestors
"""
Test Class Description: To validate Erasure code object data after killing
single server (offline rebuild).
:avocado: recursive
"""
@skipForTicket("DAOS-7212")
def test_ec_offline_rebuild(self):
"""Jira ID: DAOS-5894.
Test Description: Test Erasure code object with IOR.
Use Case: Create the pool, run IOR with supported
EC object type class for small and large transfer sizes.
kill single server, Wait to finish rebuild,
verify all IOR read data and verified.
:avocado: tags=all,hw,large,ib2,full_regression
:avocado: tags=ec,ec_offline_rebuild
"""
# Write IOR data set with different EC object and different sizes
self.ior_write_dataset()
# Kill the last server rank
self.get_dmg_command().system_stop(True, self.server_count - 1)
# Wait for rebuild to start
self.pool.wait_for_rebuild(True)
# Wait for rebuild to complete
self.pool.wait_for_rebuild(False)
# Read IOR data and verify for different EC object and different sizes
# written before killing the single server
self.ior_read_dataset()
# Kill the another server rank
self.get_dmg_command().system_stop(True, self.server_count - 2)
# Wait for rebuild to start
self.pool.wait_for_rebuild(True)
# Wait for rebuild to complete
self.pool.wait_for_rebuild(False)
# Read IOR data and verify for different EC object and different sizes
# written before killing the second server.
# Only +2 (Parity) data will be intact so read and verify only +2 IOR
# data set
self.ior_read_dataset(parity=2)
|
GeneralizedLearningUtilities/Dinosaurs
|
refs/heads/master
|
Student_Model/Tests/Concept_UnitTests.py
|
1
|
# -*- coding: utf-8 -*-
import os
import unittest.case
from Student_Model.Concept import (Concept,)
from Util.Paths import getBasePath
from Util.JSInterpreter import executeJS
from Util.Serialization import (StorageToken, Serializable, makeNative, untokenizeObject)
class ConceptTest(unittest.case.TestCase):
""" Unit test for the Agent class, test all Serializable methods """
TEST_CLASS = Concept
def setUp(self):
""" Create the Agent object to be used in the tests """
self.concept = self.TEST_CLASS()
def testChooseActions(self):
pass
def testPerceive(self):
pass
# Begin of Serializable tests
def test__init__(self):
""" Test that the Agent object is being initialized correctly """
a = self.TEST_CLASS()
self.assertIsInstance(a, self.TEST_CLASS)
self.assertTrue(type(a) == self.TEST_CLASS)
def testSaveToToken(self):
""" Test that the Agent object is being correctly saved as a Token """
self.assertIsInstance(self.concept.saveToToken(), StorageToken)
def testInitializeFromToken(self):
""" Test that the Agent object is being correctly initialized
from a Token
"""
t = self.concept.saveToToken()
c = self.TEST_CLASS("Name")
self.assertNotEqual(c, self.concept)
c.initializeFromToken(t)
self.assertEqual(c, self.concept)
def testCreateFromToken(self):
""" Test that the Agent object is being correctly created
from a Token
"""
t = self.concept.saveToToken()
c = Serializable.createFromToken(t)
self.assertEqual(c, self.concept)
def testReadFromJSObject(self):
""" Test that we can read the JS equivalent of this class into this object """
try:
dirPath = os.path.dirname(__file__)
sObj = executeJS("Concept_concept.js", dirPath + os.sep, getBasePath())
except NameError:
sObj = executeJS("Concept_concept.js", '', getBasePath())
token = makeNative(sObj)
x = untokenizeObject(token)
self.assertIsInstance(x, self.TEST_CLASS)
self.assertEqual(type(x), self.TEST_CLASS)
self.assertEqual("conceptId", x.getConceptId())
self.assertEqual("name", x.getName())
self.assertEqual("description", x.getDescription())
# End of Serializable tests
if __name__ == "__main__":
unittest.main()
|
wangyixiang/atserver
|
refs/heads/master
|
streaminfoextractor.py
|
1
|
#!/usr/bin/env python
#coding:utf-8
# Author: Yixiang.Wang
# Purpose:
# Created: 2013/12/12
import filecmp
import hashlib
import logging
import os
import subprocess
import torndb
STREAM_FILE_EXTS = ('.ts','.trp','.mpg')
# EPG_Test.exe (Standard) (source file) (log path) (language code)
# i.e: "FULLSEG" "E:\ch20_515MHz.ts" "e:/EPG_Test/" "jpn"
EPG_TOOL = r'D:\temp\EPG_Test.exe'
# DVB-T 1SEG FULLSEG
# DVB-T Y N Y
# 1SEG N Y Y
#FULLSEG Y H Y
#
#STREAM TABLE DEFINITION
# `filename` VARCHAR(255) NOT NULL,
# `filesize` BIGINT UNSIGNED NOT NULL,
# `channelnumber` SMALLINT UNSIGNED,
# `serverlocation` VARCHAR(1024),
# `streamtype` VARCHAR(16),
# `country` VARCHAR(512),
# `md5` VARCHAR(512),
# `comment` VARCHAR(512),
# PRIMARY KEY ( filename, filesize )
# `toolmd5` VARCHAR(255),
# `toolret` VARCHAR(64),
# `toollog` VARCHAR(64)
STREAM_TYPE_DVBT = 'DVB-T'
STREAM_TYPE_1SEG = '1SEG'
STREAM_TYPE_FULLSEG = 'FULLSEG'
BASE_DIR = r'D:'
DVBT_TEMP_DIR = BASE_DIR + os.sep + 'DVTB_LOG'
ONESEG_TEMP_DIR = BASE_DIR + os.sep + '1SEG_LOG'
FULLSEG_TEMP_DIR = BASE_DIR + os.sep + 'FULLSEG_LOG'
LOG_EXT_SUFFIX = '.log'
DEFAULT_SERVER_LOCATION = r'\\fileserver'
db = torndb.Connection("", "atdb", "", "")
toolfile = open(EPG_TOOL)
m = hashlib.md5()
m.update(toolfile.read())
toolmd5 = m.hexdigest()
toolfile.close()
unhandled_file_count = 0
def traverse_dir(target_path, depth=999):
if depth < 0:
return
else:
depth -= 1
paths = os.listdir(target_path)
for path in paths:
if path[0] in ('.'):
continue
if os.path.isdir(''.join([
target_path.rstrip(os.sep),
os.sep,
path])):
traverse_dir(''.join([target_path.rstrip(os.sep), os.sep, path]), depth)
continue
if os.path.splitext(path)[1].lower() not in STREAM_FILE_EXTS:
continue
process_file(''.join([target_path.rstrip(os.sep), os.sep, path]))
def process_file(src):
global unhandled_file_count
try:
src_filename = os.path.basename(src).decode('gb18030')
src_size = os.path.getsize(src)
src_location = os.path.dirname(src).decode('gb18030')
ret = _triple_run(src)
toolret = unicode(str(ret[0][0]) + '|' + str(ret[0][1]) + '|' + str(ret[0][2]))
toollog = unicode(str(ret[1][0]) + '|' + str(ret[1][1]) + '|' + str(ret[1][2]))
qstr = u"INSERT INTO tempstreams VALUE (%s, %s, %s, %s, %s, %s)"
db.execute(qstr, *(src_filename, src_size, src_location, toolmd5, toolret, toollog))
except Exception:
unhandled_file_count += 1
logging.error("unhandled file: %s" % src)
logging.exception(src)
def from_result_to_temp():
try:
qstr = u"SELECT * from tempstreams;"
qresults = db.query(qstr)
except Exception:
print "from_result_to_temp fail at start."
import sys
sys.exit(-1)
for qresult in qresults:
qstr = u"INSERT INTO epgtoolresult VALUE (%s, %s , %s ,%s, %s, %s)"
try:
src = qresult['serverlocation'] + u'\\' + qresult['filename']
db.execute(qstr, *(qresult["filename"], qresult["filesize"], qresult["serverlocation"], qresult["toolmd5"], qresult["toolret"], qresult["toollog"]))
except Exception:
logging.error("unhandled file: %s" % src)
logging.exception(src)
def from_db_record():
try:
qstr = u"SELECT * FROM epgtoolresult WHERE toolret='0|0|0'"
qresults = db.query(qstr)
except Exception:
print "from_db_record fail at start."
import sys
sys.exit(-1)
count = 0
for qresult in qresults:
src = (qresult['serverlocation'] + u'\\' + qresult['filename']).encode('gb18030')
try:
qstr = u"INSERT INTO tempstreams VALUE (%s, %s, %s, %s, %s, %s)"
count += 1
print count
exist = db.query(u"SELECT * FROM tempstreams WHERE filename=%s and filesize=%s and toolmd5=%s", *(qresult['filename'], qresult['filesize'],toolmd5))
if len(exist) == 0:
ret = _triple_run(src)
toolret = unicode(str(ret[0][0]) + '|' + str(ret[0][1]) + '|' + str(ret[0][2]))
toollog = unicode(str(ret[1][0]) + '|' + str(ret[1][1]) + '|' + str(ret[1][2]))
db.execute(qstr, *(qresult['filename'], qresult['filesize'], qresult['serverlocation'], toolmd5, toolret, toollog))
except Exception:
logging.error("unhandled file: %s" % src)
logging.exception(src)
def process_file_from_log():
ude_file = open('unicode_decoded_err.txt')
ude_data = ude_file.readlines()
ude_file.close()
for line in ude_data:
print line
process_file(line.strip())
def _triple_run(src):
STANDARD_ARG_DVBT = 'DVBT'
STANDARD_ARG_1SEG = '1SEG'
STANDARD_ARG_FULLSEG = 'FULLSEG'
src_filename = os.path.basename(src)
dvbt_ret = subprocess.call([EPG_TOOL, STANDARD_ARG_DVBT, src, DVBT_TEMP_DIR])
oneseg_ret = subprocess.call([EPG_TOOL, STANDARD_ARG_1SEG, src, ONESEG_TEMP_DIR])
fullseg_ret = subprocess.call([EPG_TOOL, STANDARD_ARG_FULLSEG, src, FULLSEG_TEMP_DIR])
dvbt_log_file = DVBT_TEMP_DIR + os.sep + src_filename + LOG_EXT_SUFFIX
oneseg_log_file = ONESEG_TEMP_DIR + os.sep + src_filename + LOG_EXT_SUFFIX
fullseg_log_file = FULLSEG_TEMP_DIR + os.sep + src_filename + LOG_EXT_SUFFIX
cmp_tuple = [0,0,0]
if os.path.exists(dvbt_log_file):
if os.path.exists(oneseg_log_file):
if filecmp.cmp(dvbt_log_file, oneseg_log_file, 0):
cmp_tuple[0] = 1
cmp_tuple[1] = 1
else:
cmp_tuple[0] = 1
cmp_tuple[1] = 2
else:
cmp_tuple[0] = 1
if os.path.exists(dvbt_log_file):
if os.path.exists(fullseg_log_file):
if filecmp.cmp(dvbt_log_file, fullseg_log_file, 0):
cmp_tuple[0] = 1
cmp_tuple[2] = 1
else:
cmp_tuple[0] = 1
cmp_tuple[2] = 3
else:
cmp_tuple[0] = 1
if os.path.exists(oneseg_log_file):
if os.path.exists(fullseg_log_file):
if filecmp.cmp(oneseg_log_file, fullseg_log_file, 0):
if cmp_tuple[1] == 0:
cmp_tuple[1] = 2
cmp_tuple[2] = 2
else:
cmp_tuple[2] = cmp_tuple[1]
else:
if cmp_tuple[1] == 0:
cmp_tuple[1] = 2
if cmp_tuple[2] == 0:
cmp_tuple[2] == 3
else:
cmp_tuple[1] = 2
if cmp_tuple[0] != 0:
f = open(dvbt_log_file)
d = f.read()
if d.strip().lower() == src.lower():
cmp_tuple[0] = 4
f.close()
os.remove(dvbt_log_file)
if cmp_tuple[1] != 0:
f = open(oneseg_log_file)
d = f.read()
if d.strip().lower() == src.lower():
cmp_tuple[1] = 4
f.close()
os.remove(oneseg_log_file)
if cmp_tuple[2] != 0:
f = open(fullseg_log_file)
d = f.read()
if d.strip().lower() == src.lower():
cmp_tuple[2] = 4
f.close()
os.remove(fullseg_log_file)
return [ [dvbt_ret,oneseg_ret,fullseg_ret], cmp_tuple ]
def parse_tempstreams_log():
def _handle_it(data, match):
l = len('2014-01-06 18:36:30,716 ERROR unhandled file: ')
data = data[0][l:].strip()
try:
ret_dict[match].append(data)
except:
logging.exception('')
import re
ret_dict = {'IntegrityError:':[],'UnicodeDecodeError:':[], 'WindowsError:':[]}
re_pattern = '^[a-zA-Z]+?:'
rp = re.compile(re_pattern)
log_filenames = [r"D:\tempstreams.log",r"D:\2tempstreams.log",r"D:\3tempstreams.log"]
for log_filename in log_filenames:
log_file = open(log_filename)
log_data = log_file.readlines()
log_file.close()
temp_lines = []
for line in log_data:
temp_lines.append(line)
m = rp.match(line)
if m != None:
_handle_it(temp_lines, m.group(0))
temp_lines = []
#dup_file = open('duplicated_file.txt', 'w')
#ude_file = open('unicode_decoded_err.txt', 'w')
#we_file = open('windows_err.txt','w')
#for item in ret_dict['IntegrityError:']:
#dup_file.write(item + os.linesep)
#for item in ret_dict['UnicodeDecodeError:']:
#ude_file.write(item + os.linesep)
#for item in ret_dict['WindowsError:']:
#we_file.write(item + os.linesep)
#dup_file.close()
#ude_file.close()
#we_file.close()
def sum_dup_file_size():
filename = r'duplicated_file.txt'
if __name__ == '__main__':
FORMAT = "%(asctime)s %(levelname)s %(message)s"
logging.basicConfig(filename=r'D:\4tempstreams.log', format=FORMAT)
from_db_record()
|
ChenJunor/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/django/http/utils.py
|
134
|
"""
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
|
overtherain/scriptfile
|
refs/heads/master
|
software/googleAppEngine/lib/django_1_3/django/contrib/admin/sites.py
|
70
|
import re
from django import http, template
from django.contrib.admin import ModelAdmin, actions
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.contenttypes import views as contenttype_views
from django.views.decorators.csrf import csrf_protect
from django.db.models.base import ModelBase
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.utils.functional import update_wrapper
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.conf import settings
LOGIN_FORM_KEY = 'this_is_the_login_form'
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name=None, app_name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.root_path = None
if name is None:
self.name = 'admin'
else:
self.name = name
self.app_name = app_name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
# Don't import the humongous validation code unless required
if admin_class and settings.DEBUG:
from django.contrib.admin.validation import validate
else:
validate = lambda model, adminclass: None
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Validate (which might be a no-op)
validate(admin_class, model)
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitally get a registered global action wheather it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return self._actions.iteritems()
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that LogEntry, ContentType and the
auth context processor are installed.
"""
from django.contrib.admin.models import LogEntry
from django.contrib.contenttypes.models import ContentType
if not LogEntry._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.admin' in your "
"INSTALLED_APPS setting in order to use the admin application.")
if not ContentType._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.contenttypes' in "
"your INSTALLED_APPS setting in order to use the admin application.")
if not ('django.contrib.auth.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS or
'django.core.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS):
raise ImproperlyConfigured("Put 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the admin application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urls = super(MyAdminSite, self).get_urls()
urls += patterns('',
url(r'^my_view/$', self.admin_view(some_view))
)
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
return self.login(request)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls.defaults import patterns, url, include
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = patterns('',
url(r'^$',
wrap(self.index),
name='index'),
url(r'^logout/$',
wrap(self.logout),
name='logout'),
url(r'^password_change/$',
wrap(self.password_change, cacheable=True),
name='password_change'),
url(r'^password_change/done/$',
wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$',
wrap(self.i18n_javascript, cacheable=True),
name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$',
wrap(contenttype_views.shortcut)),
url(r'^(?P<app_label>\w+)/$',
wrap(self.app_index),
name='app_list')
)
# Add in each model's views.
for model, model_admin in self._registry.iteritems():
urlpatterns += patterns('',
url(r'^%s/%s/' % (model._meta.app_label, model._meta.module_name),
include(model_admin.urls))
)
return urlpatterns
@property
def urls(self):
return self.get_urls(), self.app_name, self.name
def password_change(self, request):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.auth.views import password_change
if self.root_path is not None:
url = '%spassword_change/done/' % self.root_path
else:
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'post_change_redirect': url
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
from django.contrib.auth.views import login
context = {
'title': _('Log in'),
'root_path': self.root_path,
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
}
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
user = request.user
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = user.has_module_perms(app_label)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'admin_url': mark_safe('%s/%s/' % (app_label, model.__name__.lower())),
'perms': perms,
}
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': app_label.title(),
'app_url': app_label + '/',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = app_dict.values()
app_list.sort(key=lambda x: x['name'])
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = {
'title': _('Site administration'),
'app_list': app_list,
'root_path': self.root_path,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.name)
return render_to_response(self.index_template or 'admin/index.html', context,
context_instance=context_instance
)
def app_index(self, request, app_label, extra_context=None):
user = request.user
has_module_perms = user.has_module_perms(app_label)
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'admin_url': '%s/' % model.__name__.lower(),
'perms': perms,
}
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_label.title(),
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise http.Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = {
'title': _('%s administration') % capfirst(app_label),
'app_list': [app_dict],
'root_path': self.root_path,
}
context.update(extra_context or {})
context_instance = template.RequestContext(request, current_app=self.name)
return render_to_response(self.app_index_template or ('admin/%s/app_index.html' % app_label,
'admin/app_index.html'), context,
context_instance=context_instance
)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
|
semonte/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/db/models/query.py
|
71
|
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
from itertools import izip
from django.db import connections, router, transaction, IntegrityError
from django.db.models.aggregates import Aggregate
from django.db.models.fields import DateField
from django.db.models.query_utils import (Q, select_related_descend,
deferred_class_factory, InvalidQuery)
from django.db.models.deletion import Collector
from django.db.models import signals, sql
from django.utils.copycompat import deepcopy
# Used to control how many objects are worked with at once in some cases (e.g.
# when deleting objects).
CHUNK_SIZE = 100
ITER_CHUNK_SIZE = CHUNK_SIZE
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None):
self.model = model
# EmptyQuerySet instantiates QuerySet with model as None
self._db = using
self.query = query or sql.Query(self.model)
self._result_cache = None
self._iter = None
self._sticky_filter = False
self._for_write = False
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k,v in self.__dict__.items():
if k in ('_iter','_result_cache'):
obj.__dict__[k] = None
else:
obj.__dict__[k] = deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(list(self._iter))
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def _result_iter(self):
pos = 0
while 1:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos = pos + 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
iter(self).next()
except StopIteration:
return False
return True
def __contains__(self, val):
# The 'in' operator works without this method, due to __iter__. This
# implementation exists only to shortcut the creation of Model
# instances, by bailing out early if we find a matching element.
pos = 0
if self._result_cache is not None:
if val in self._result_cache:
return True
elif self._iter is None:
# iterator is exhausted, so we have our answer
return False
# remember not to check these again:
pos = len(self._result_cache)
else:
# We need to start filling the result cache out. The following
# ensures that self._iter is not None and self._result_cache is not
# None
it = iter(self)
# Carry on, one result at a time.
while True:
if len(self._result_cache) <= pos:
self._fill_cache(num=1)
if self._iter is None:
# we ran out of items
return False
if self._result_cache[pos] == val:
return True
pos += 1
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
try:
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
except self.model.DoesNotExist, e:
raise IndexError(e.args)
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
combined = self._clone()
if isinstance(other, EmptyQuerySet):
return combined
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
fill_cache = self.query.select_related
if isinstance(fill_cache, dict):
requested = fill_cache
else:
requested = None
max_depth = self.query.max_depth
extra_select = self.query.extra_select.keys()
aggregate_select = self.query.aggregate_select.keys()
only_load = self.query.get_loaded_field_names()
if not fill_cache:
fields = self.model._meta.fields
pk_idx = self.model._meta.pk_index()
index_start = len(extra_select)
aggregate_start = index_start + len(self.model._meta.fields)
load_fields = []
# If only/defer clauses have been specified,
# build the list of fields that are to be loaded.
if only_load:
for field, model in self.model._meta.get_fields_with_model():
if model is None:
model = self.model
if field == self.model._meta.pk:
# Record the index of the primary key when it is found
pk_idx = len(load_fields)
try:
if field.name in only_load[model]:
# Add a field that has been explicitly included
load_fields.append(field.name)
except KeyError:
# Model wasn't explicitly listed in the only_load table
# Therefore, we need to load all fields from this model
load_fields.append(field.name)
skip = None
if load_fields and not fill_cache:
# Some fields have been deferred, so we have to initialise
# via keyword arguments.
skip = set()
init_list = []
for field in fields:
if field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.attname)
model_cls = deferred_class_factory(self.model, skip)
# Cache db and model outside the loop
db = self.db
model = self.model
compiler = self.query.get_compiler(using=db)
for row in compiler.results_iter():
if fill_cache:
obj, _ = get_cached_row(model, row,
index_start, using=db, max_depth=max_depth,
requested=requested, offset=len(aggregate_select),
only_load=only_load)
else:
if skip:
row_data = row[index_start:aggregate_start]
pk_val = row_data[pk_idx]
obj = model_cls(**dict(zip(init_list, row_data)))
else:
# Omit aggregates in object creation.
obj = model(*row[index_start:aggregate_start])
# Store the source database of the object
obj._state.db = db
# This object came from the database; it's not being added.
obj._state.adding = False
if extra_select:
for i, k in enumerate(extra_select):
setattr(obj, k, row[i])
# Add the aggregates to the model
if aggregate_select:
for i, aggregate in enumerate(aggregate_select):
setattr(obj, aggregate, row[i+aggregate_start])
yield obj
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
for arg in args:
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=True)
return query.get_aggregation(using=self.db)
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist("%s matching query does not exist."
% self.model._meta.object_name)
raise self.model.MultipleObjectsReturned("get() returned more than one %s -- it returned %s! Lookup parameters were %s"
% (self.model._meta.object_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, \
'get_or_create() must be passed at least one keyword argument'
defaults = kwargs.pop('defaults', {})
try:
self._for_write = True
return self.get(**kwargs), False
except self.model.DoesNotExist:
try:
params = dict([(k, v) for k, v in kwargs.items() if '__' not in k])
params.update(defaults)
obj = self.model(**params)
sid = transaction.savepoint(using=self.db)
obj.save(force_insert=True, using=self.db)
transaction.savepoint_commit(sid, using=self.db)
return obj, True
except IntegrityError, e:
transaction.savepoint_rollback(sid, using=self.db)
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
raise e
def latest(self, field_name=None):
"""
Returns the latest object, according to the model's 'get_latest_by'
option or optional given field_name.
"""
latest_by = field_name or self.model._meta.get_latest_by
assert bool(latest_by), "latest() requires either a field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.add_ordering('-%s' % latest_by)
return obj.get()
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
assert isinstance(id_list, (tuple, list, set, frozenset)), \
"in_bulk() must be provided with a list of IDs."
if not id_list:
return {}
qs = self._clone()
qs.query.add_filter(('pk__in', id_list))
return dict([(obj._get_pk_val(), obj) for obj in qs.iterator()])
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_related = False
del_query.query.clear_ordering()
collector = Collector(using=del_query.db)
collector.collect(del_query)
collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
if not transaction.is_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
rows = query.get_compiler(self.db).execute_sql(None)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(None)
_update.alters_data = True
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (kwargs.keys(),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of datetime objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("month", "year", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self._clone(klass=DateQuerySet, setup=True,
_field_name=field_name, _kind=kind, _order=order)
def none(self):
"""
Returns an empty QuerySet.
"""
return self._clone(klass=EmptyQuerySet)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_related(self, *fields, **kwargs):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
"""
depth = kwargs.pop('depth', 0)
if kwargs:
raise TypeError('Unexpected keyword arguments to select_related: %s'
% (kwargs.keys(),))
obj = self._clone()
if fields:
if depth:
raise TypeError('Cannot pass both "depth" and fields to select_related()')
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
if depth:
obj.query.max_depth = depth
return obj
def dup_select_related(self, other):
"""
Copies the related selection status from the QuerySet 'other' to the
current QuerySet.
"""
self.query.select_related = other.query.select_related
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with data aggregated from related fields.
"""
for arg in args:
if arg.default_alias in kwargs:
raise ValueError("The %s named annotation conflicts with the "
"default name for another annotation."
% arg.default_alias)
kwargs[arg.default_alias] = arg
names = set(self.model._meta.get_all_field_names())
for aggregate in kwargs:
if aggregate in names:
raise ValueError("The %s annotation conflicts with a field on "
"the model." % aggregate)
obj = self._clone()
obj._setup_aggregate_query(kwargs.keys())
# Add the aggregates to the query
for (alias, aggregate_expr) in kwargs.items():
obj.query.add_aggregate(aggregate_expr, self.model, alias,
is_summary=False)
return obj
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering()
obj.query.add_ordering(*field_names)
return obj
def distinct(self, true_or_false=True):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
obj = self._clone()
obj.query.distinct = true_or_false
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should excecute it's query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.model._meta.ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model)
return self._db or router.db_for_read(self.model)
###################
# PRIVATE METHODS #
###################
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
c = klass(model=self.model, query=query, using=self._db)
c._for_write = self._for_write
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(self._iter.next())
except StopIteration:
self._iter = None
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
pass
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
opts = self.model._meta
if self.query.group_by is None:
field_names = [f.attname for f in opts.fields]
self.query.add_fields(field_names, False)
self.query.set_group_by()
def _prepare(self):
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
obj = self.values("pk")
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related isn't supported in values(). (FIXME -#3358)
self.query.select_related = False
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
def iterator(self):
# Purge any extra columns that haven't been explicitly asked for
extra_names = self.query.extra_select.keys()
field_names = self.field_names
aggregate_names = self.query.aggregate_select.keys()
names = extra_names + field_names + aggregate_names
for row in self.query.get_compiler(self.db).results_iter():
yield dict(zip(names, row))
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query.clear_select_fields()
if self._fields:
self.extra_names = []
self.aggregate_names = []
if not self.query.extra and not self.query.aggregates:
# Short cut - if there are no extra or aggregates, then
# the values() clause must be just field names.
self.field_names = list(self._fields)
else:
self.query.default_cols = False
self.field_names = []
for f in self._fields:
# we inspect the full extra_select list since we might
# be adding back an extra select item that we hadn't
# had selected previously.
if f in self.query.extra:
self.extra_names.append(f)
elif f in self.query.aggregate_select:
self.aggregate_names.append(f)
else:
self.field_names.append(f)
else:
# Default to all fields.
self.extra_names = None
self.field_names = [f.attname for f in self.model._meta.fields]
self.aggregate_names = None
self.query.select = []
if self.extra_names is not None:
self.query.set_extra_mask(self.extra_names)
self.query.add_fields(self.field_names, True)
if self.aggregate_names is not None:
self.query.set_aggregate_mask(self.aggregate_names)
def _clone(self, klass=None, setup=False, **kwargs):
"""
Cloning a ValuesQuerySet preserves the current fields.
"""
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
if not hasattr(c, '_fields'):
# Only clone self._fields if _fields wasn't passed into the cloning
# call directly.
c._fields = self._fields[:]
c.field_names = self.field_names
c.extra_names = self.extra_names
c.aggregate_names = self.aggregate_names
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _merge_sanity_check(self, other):
super(ValuesQuerySet, self)._merge_sanity_check(other)
if (set(self.extra_names) != set(other.extra_names) or
set(self.field_names) != set(other.field_names) or
self.aggregate_names != other.aggregate_names):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
self.query.set_group_by()
if self.aggregate_names is not None:
self.aggregate_names.extend(aggregates)
self.query.set_aggregate_mask(self.aggregate_names)
super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
def _as_sql(self, connection):
"""
For ValueQuerySet (and subclasses like ValuesListQuerySet), they can
only be used as nested queries if they're already set up to select only
a single field (in which case, that is the field column that is
returned). This differs from QuerySet.as_sql(), where the column to
select is set up by Django.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
obj = self._clone()
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
def _prepare(self):
"""
Validates that we aren't trying to do a query like
value__in=qs.values('value1', 'value2'), which isn't valid.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
return self
class ValuesListQuerySet(ValuesQuerySet):
def iterator(self):
if self.flat and len(self._fields) == 1:
for row in self.query.get_compiler(self.db).results_iter():
yield row[0]
elif not self.query.extra_select and not self.query.aggregate_select:
for row in self.query.get_compiler(self.db).results_iter():
yield tuple(row)
else:
# When extra(select=...) or an annotation is involved, the extra
# cols are always at the start of the row, and we need to reorder
# the fields to match the order in self._fields.
extra_names = self.query.extra_select.keys()
field_names = self.field_names
aggregate_names = self.query.aggregate_select.keys()
names = extra_names + field_names + aggregate_names
# If a field list has been specified, use it. Otherwise, use the
# full list of fields, including extras and aggregates.
if self._fields:
fields = list(self._fields) + filter(lambda f: f not in self._fields, aggregate_names)
else:
fields = names
for row in self.query.get_compiler(self.db).results_iter():
data = dict(zip(names, row))
yield tuple([data[f] for f in fields])
def _clone(self, *args, **kwargs):
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
if not hasattr(clone, "flat"):
# Only assign flat if the clone didn't already get it from kwargs
clone.flat = self.flat
return clone
class DateQuerySet(QuerySet):
def iterator(self):
return self.query.get_compiler(self.db).results_iter()
def _setup_query(self):
"""
Sets up any special features of the query attribute.
Called by the _clone() method after initializing the rest of the
instance.
"""
self.query.clear_deferred_loading()
self.query = self.query.clone(klass=sql.DateQuery, setup=True)
self.query.select = []
self.query.add_date_select(self._field_name, self._kind, self._order)
def _clone(self, klass=None, setup=False, **kwargs):
c = super(DateQuerySet, self)._clone(klass, False, **kwargs)
c._field_name = self._field_name
c._kind = self._kind
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
class EmptyQuerySet(QuerySet):
def __init__(self, model=None, query=None, using=None):
super(EmptyQuerySet, self).__init__(model, query, using)
self._result_cache = []
def __and__(self, other):
return self._clone()
def __or__(self, other):
return other._clone()
def count(self):
return 0
def delete(self):
pass
def _clone(self, klass=None, setup=False, **kwargs):
c = super(EmptyQuerySet, self)._clone(klass, setup=setup, **kwargs)
c._result_cache = []
return c
def iterator(self):
# This slightly odd construction is because we need an empty generator
# (it raises StopIteration immediately).
yield iter([]).next()
def all(self):
"""
Always returns EmptyQuerySet.
"""
return self
def filter(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def exclude(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def complex_filter(self, filter_obj):
"""
Always returns EmptyQuerySet.
"""
return self
def select_related(self, *fields, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def annotate(self, *args, **kwargs):
"""
Always returns EmptyQuerySet.
"""
return self
def order_by(self, *field_names):
"""
Always returns EmptyQuerySet.
"""
return self
def distinct(self, true_or_false=True):
"""
Always returns EmptyQuerySet.
"""
return self
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Always returns EmptyQuerySet.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
return self
def reverse(self):
"""
Always returns EmptyQuerySet.
"""
return self
def defer(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def only(self, *fields):
"""
Always returns EmptyQuerySet.
"""
return self
def update(self, **kwargs):
"""
Don't update anything.
"""
return 0
# EmptyQuerySet is always an empty result in where-clauses (and similar
# situations).
value_annotation = False
def get_cached_row(klass, row, index_start, using, max_depth=0, cur_depth=0,
requested=None, offset=0, only_load=None, local_only=False):
"""
Helper function that recursively returns an object with the specified
related attributes already populated.
This method may be called recursively to populate deep select_related()
clauses.
Arguments:
* klass - the class to retrieve (and instantiate)
* row - the row of data returned by the database cursor
* index_start - the index of the row at which data for this
object is known to start
* using - the database alias on which the query is being executed.
* max_depth - the maximum depth to which a select_related()
relationship should be explored.
* cur_depth - the current depth in the select_related() tree.
Used in recursive calls to determin if we should dig deeper.
* requested - A dictionary describing the select_related() tree
that is to be retrieved. keys are field names; values are
dictionaries describing the keys on that related object that
are themselves to be select_related().
* offset - the number of additional fields that are known to
exist in `row` for `klass`. This usually means the number of
annotated results on `klass`.
* only_load - if the query has had only() or defer() applied,
this is the list of field names that will be returned. If None,
the full field list for `klass` can be assumed.
* local_only - Only populate local fields. This is used when building
following reverse select-related relations
"""
if max_depth and requested is None and cur_depth > max_depth:
# We've recursed deeply enough; stop now.
return None
restricted = requested is not None
if only_load:
load_fields = only_load.get(klass)
# When we create the object, we will also be creating populating
# all the parent classes, so traverse the parent classes looking
# for fields that must be included on load.
for parent in klass._meta.get_parent_list():
fields = only_load.get(parent)
if fields:
load_fields.update(fields)
else:
load_fields = None
if load_fields:
# Handle deferred fields.
skip = set()
init_list = []
# Build the list of fields that *haven't* been requested
for field, model in klass._meta.get_fields_with_model():
if field.name not in load_fields:
skip.add(field.name)
elif local_only and model is not None:
continue
else:
init_list.append(field.attname)
# Retrieve all the requested fields
field_count = len(init_list)
fields = row[index_start : index_start + field_count]
# If all the select_related columns are None, then the related
# object must be non-existent - set the relation to None.
# Otherwise, construct the related object.
if fields == (None,) * field_count:
obj = None
elif skip:
klass = deferred_class_factory(klass, skip)
obj = klass(**dict(zip(init_list, fields)))
else:
obj = klass(*fields)
else:
# Load all fields on klass
if local_only:
field_names = [f.attname for f in klass._meta.local_fields]
else:
field_names = [f.attname for f in klass._meta.fields]
field_count = len(field_names)
fields = row[index_start : index_start + field_count]
# If all the select_related columns are None, then the related
# object must be non-existent - set the relation to None.
# Otherwise, construct the related object.
if fields == (None,) * field_count:
obj = None
else:
obj = klass(**dict(zip(field_names, fields)))
# If an object was retrieved, set the database state.
if obj:
obj._state.db = using
obj._state.adding = False
index_end = index_start + field_count + offset
# Iterate over each related object, populating any
# select_related() fields
for f in klass._meta.fields:
if not select_related_descend(f, restricted, requested):
continue
if restricted:
next = requested[f.name]
else:
next = None
# Recursively retrieve the data for the related object
cached_row = get_cached_row(f.rel.to, row, index_end, using,
max_depth, cur_depth+1, next, only_load=only_load)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the base object exists, populate the
# descriptor cache
setattr(obj, f.get_cache_name(), rel_obj)
if f.unique and rel_obj is not None:
# If the field is unique, populate the
# reverse descriptor cache on the related object
setattr(rel_obj, f.related.get_cache_name(), obj)
# Now do the same, but for reverse related objects.
# Only handle the restricted case - i.e., don't do a depth
# descent into reverse relations unless explicitly requested
if restricted:
related_fields = [
(o.field, o.model)
for o in klass._meta.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested, reverse=True):
continue
next = requested[f.related_query_name()]
# Recursively retrieve the data for the related object
cached_row = get_cached_row(model, row, index_end, using,
max_depth, cur_depth+1, next, only_load=only_load, local_only=True)
# If the recursive descent found an object, populate the
# descriptor caches relevant to the object
if cached_row:
rel_obj, index_end = cached_row
if obj is not None:
# If the field is unique, populate the
# reverse descriptor cache
setattr(obj, f.related.get_cache_name(), rel_obj)
if rel_obj is not None:
# If the related object exists, populate
# the descriptor cache.
setattr(rel_obj, f.get_cache_name(), obj)
# Now populate all the non-local field values
# on the related object
for rel_field,rel_model in rel_obj._meta.get_fields_with_model():
if rel_model is not None:
setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname))
# populate the field cache for any related object
# that has already been retrieved
if rel_field.rel:
try:
cached_obj = getattr(obj, rel_field.get_cache_name())
setattr(rel_obj, rel_field.get_cache_name(), cached_obj)
except AttributeError:
# Related object hasn't been cached yet
pass
return obj, index_end
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None):
self.raw_query = raw_query
self.model = model
self._db = using
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def __iter__(self):
# Mapping of attrnames to row column positions. Used for constructing
# the model using kwargs, needed when not all model's fields are present
# in the query.
model_init_field_names = {}
# A list of tuples of (column name, column position). Used for
# annotation fields.
annotation_fields = []
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
need_resolv_columns = hasattr(compiler, 'resolve_columns')
query = iter(self.query)
# Find out which columns are model's fields, and which ones should be
# annotated to the model.
for pos, column in enumerate(self.columns):
if column in self.model_fields:
model_init_field_names[self.model_fields[column].attname] = pos
else:
annotation_fields.append((column, pos))
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_field_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
# All model's fields are present in the query. So, it is possible
# to use *args based model instantation. For each field of the model,
# record the query column position matching that field.
model_init_field_pos = []
for field in self.model._meta.fields:
model_init_field_pos.append(model_init_field_names[field.attname])
if need_resolv_columns:
fields = [self.model_fields.get(c, None) for c in self.columns]
# Begin looping through the query values.
for values in query:
if need_resolv_columns:
values = compiler.resolve_columns(values, fields)
# Associate fields to values
if skip:
model_init_kwargs = {}
for attname, pos in model_init_field_names.iteritems():
model_init_kwargs[attname] = values[pos]
instance = model_cls(**model_init_kwargs)
else:
model_init_args = [values[pos] for pos in model_init_field_pos]
instance = model_cls(*model_init_args)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
instance._state.db = db
instance._state.adding = False
yield instance
def __repr__(self):
return "<RawQuerySet: %r>" % (self.raw_query % self.params)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model)
def using(self, alias):
"""
Selects which database this Raw QuerySet should excecute it's query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existant column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
def insert_query(model, values, return_id=False, raw_values=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented. It is not
part of the public API.
"""
query = sql.InsertQuery(model)
query.insert_values(values, raw_values)
return query.get_compiler(using=using).execute_sql(return_id)
|
radical-cybertools/radical.ensemblemd
|
refs/heads/master
|
src/radical/entk/utils/__init__.py
|
1
|
# ------------------------------------------------------------------------------
#
from .prof_utils import get_hostmap
from .prof_utils import get_hostmap_deprecated
from .prof_utils import get_session_profile
from .prof_utils import write_session_description
from .prof_utils import get_session_description
from .prof_utils import write_workflows
# ------------------------------------------------------------------------------
|
rohitwaghchaure/digitales_erpnext
|
refs/heads/develop
|
erpnext/selling/doctype/sales_order/sales_order.py
|
1
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and, cint
from frappe import _
from frappe.model.mapper import get_mapped_doc
import json
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"sales_order_details": "templates/form_grid/item_grid.html"
}
class SalesOrder(SellingController):
tname = 'Sales Order Item'
fname = 'sales_order_details'
person_tname = 'Target Detail'
partner_tname = 'Partner Target Detail'
territory_tname = 'Territory Target Detail'
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.delivery_date:
if getdate(self.transaction_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Sales Order Date"))
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and self.delivery_date and getdate(self.po_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Purchase Order Date"))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0]:
frappe.msgprint(_("Warning: Sales Order {0} already exists against same Purchase Order number").format(so[0][0]))
def validate_for_items(self):
check_list, flag = [], 0
chk_dupl_itm = []
for d in self.get('sales_order_details'):
e = [d.item_code, d.description, d.warehouse, d.prevdoc_docname or '']
f = [d.item_code, d.description]
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 'Yes':
if not d.warehouse:
frappe.throw(_("Reserved warehouse required for stock item {0}").format(d.item_code))
if e in check_list:
frappe.throw(_("Item {0} has been entered twice").format(d.item_code))
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
frappe.throw(_("Item {0} has been entered twice").format(d.item_code))
else:
chk_dupl_itm.append(f)
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code,d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
def validate_sales_mntc_quotation(self):
for d in self.get('sales_order_details'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}").format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if not self.delivery_date:
frappe.throw(_("Please enter 'Expected Delivery Date'"))
#self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project_name and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate(self):
super(SalesOrder, self).validate()
#self.validate_order_type()
self.set_delivery_date()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
# self.validate_for_items()
self.validate_warehouse()
#frappe.errprint("in validate")
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self,'sales_order_details')
self.validate_with_previous_doc()
if not self.status:
self.status = "Draft"
from erpnext.utilities import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def set_delivery_date(self):
#frappe.errprint("in set delivery date")
from datetime import date
from dateutil.relativedelta import relativedelta
if not self.delivery_date:
self.delivery_date = date.today() + relativedelta(days=+6)
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get(self.fname) if d.warehouse]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc(self.tname, {
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get(self.fname)])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
def on_submit(self):
super(SalesOrder, self).on_submit()
self.update_stock_ledger(update_stock = 1)
self.check_credit(self.grand_total)
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.grand_total, self)
#self.validate_contract(self.customer)
self.update_prevdoc_status('submit')
frappe.db.set(self, 'status', 'Submitted')
def validate_contract(self,customer):
contract_dates=frappe.db.sql("""select contract_start_date,contract_end_date
from `tabCustomer Contract Form` where Customer='%s'
"""%customer,as_list=1)
if contract_dates:
if contract_dates[0][0] <= self.transaction_date <= contract_dates[0][1] :
pass
else:
frappe.msgprint("Selected customer contract is expired",raise_exception=1)
def on_cancel(self):
# Cannot cancel stopped SO
if self.status == 'Stopped':
frappe.throw(_("Stopped order cannot be cancelled. Unstop to cancel."))
self.check_nextdoc_docstatus()
self.update_stock_ledger(update_stock = -1)
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def stop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(-1)
frappe.db.set(self, 'status', 'Stopped')
frappe.msgprint(_("{0} {1} status is Stopped").format(self.doctype, self.name))
def unstop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(1)
frappe.db.set(self, 'status', 'Submitted')
frappe.msgprint(_("{0} {1} status is Unstopped").format(self.doctype, self.name))
def update_stock_ledger(self, update_stock):
from erpnext.stock.utils import update_bin
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == "Yes":
args = {
"item_code": d['item_code'],
"warehouse": d['reserved_warehouse'],
"reserved_qty": flt(update_stock) * flt(d['reserved_qty']),
"posting_date": self.transaction_date,
"voucher_type": self.doctype,
"voucher_no": self.name,
"is_amended": self.amended_from and 'Yes' or 'No'
}
update_bin(args)
def get_portal_page(self):
return "order" if self.docstatus==1 else None
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"stock_uom": "uom"
}
}
}, target_doc, postprocess)
return doc
# @frappe.whitelist()
# def make_delivery_note(source_name, target_doc=None):
# def set_missing_values(source, target):
# target.ignore_pricing_rule = 1
# target.run_method("set_missing_values")
# target.run_method("calculate_taxes_and_totals")
# def update_item(source, target, source_parent):
# target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
# target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
# # target.qty = flt(source.qty) - flt(source.delivered_qty)
# target.qty = flt(source.assigned_qty) - flt(source.delivered_qty)
# target.assigned_qty = source.assigned_qty
# target_doc = get_mapped_doc("Sales Order", source_name, {
# "Sales Order": {
# "doctype": "Delivery Note",
# "validation": {
# "docstatus": ["=", 1]
# }
# },
# "Sales Order Item": {
# "doctype": "Delivery Note Item",
# "field_map": {
# "rate": "rate",
# "name": "prevdoc_detail_docname",
# "parent": "against_sales_order",
# },
# "postprocess": update_item,
# "condition": lambda doc: doc.delivered_qty < doc.qty
# },
# "Sales Taxes and Charges": {
# "doctype": "Sales Taxes and Charges",
# "add_if_empty": True
# },
# "Sales Team": {
# "doctype": "Sales Team",
# "add_if_empty": True
# }
# }, target_doc, set_missing_values)
# return target_doc
# by pitambar
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
target.new_order_type = source.new_order_type
target.budget = source.budget
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.assigned_qty) - flt(source.delivered_qty) if frappe.db.get_value('Item', source.item_code, 'is_stock_item') == 'Yes' else source.qty
target.assigned_qty = source.assigned_qty
target.line_order_item = source.line_item
target.artist = source.artist or frappe.db.get_value('Item', {'name':source.item_code}, 'artist')
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "prevdoc_detail_docname",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: ((flt((doc.assigned_qty if doc.assigned_qty else 0) - (doc.delivered_qty if doc.delivered_qty else 0)) > 0.0) and doc.stop_status!="Yes") if frappe.db.get_value('Item', doc.item_code, 'is_stock_item') == 'Yes' and flt(doc.assigned_qty)>0 and doc.qty>0 else (((doc.delivered_qty < doc.qty) and doc.stop_status!="Yes" and doc.qty>0) if frappe.db.get_value('Item', doc.item_code, 'is_stock_item') == 'No' else flt(doc.assigned_qty) >0)
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Vouchers in Sales Invoice Advance
target.get_advances()
# To get process details against sales order for which you are generating sales invoice---------
if source.doctype=='Sales Order':
#frappe.errprint("2 doctype is sales order")
get_shelf_service_details(source,source_name,target)
set_missing_values(source, target)
target.get_advances()
#update_item(source,target,source_parent)
def get_shelf_service_details(source,source_name,target):
process=frappe.db.sql(""" select name from `tabProcess` where get_sales_order='%s'
and docstatus=1 and sales_invoice_status='Not Done'"""%source_name,as_list=1)
if process:
for [name] in process:
create_sales_invoice_item_entry(name,target)
update_sales_order_process_status(source_name)
def update_process_entry(name):
frappe.db.sql("""update `tabProcess` set sales_invoice_status='Done' where
name='%s'"""%name)
frappe.db.commit()
def update_sales_order_process_status(source_name):
frappe.db.sql("""update `tabSales Order` set process_status='Completed' where
name='%s'"""%source_name)
frappe.db.commit()
def create_sales_invoice_item_entry(name,target):
service_details=frappe.db.sql("""select s.process,ifnull(s.qty,0),s.file_name from `tabShelf Ready Service Details` s
inner join `tabProcess` p on s.parent=p.name where s.parent='%s' """%name,as_list=1)
if service_details:
for i in service_details:
si = target.append('entries', {})
si.item_code=i[0]
si.item_name=i[0]
si.description=i[0]
si.qty=i[1]
#si.rate=i[2]
#si.amount=i[3]
#si.shelf_ready_service_name=i[0]
if i[2]!=None:
si.marcfile_name=i[2]
else:
si.marcfile_name=""
si.sales_order=source_name
#si.income_account='Sales - D'
#si.cost_center='Main - D'
si.process_id= name
#update_process_entry(name)
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
if cint(source.is_recurring) == 1:
target.is_recurring = source.is_recurring
target.recurring_type = source.recurring_type
target.from_date = source.from_date
target.to_date = source.to_date
target.repeat_on_day_of_month = source.repeat_on_day_of_month
target.end_date = source.end_date
target.notification_email_address = source.notification_email_address
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty
target.so_detail = source.name
target.artist = source.artist or frappe.db.get_value('Item', {'name':source.item_code}, 'artist')
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: (doc.base_amount==0 or doc.billed_amt < doc.amount) and doc.stop_status!="Yes"
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess)
def set_advance_vouchers(source, target):
advance_voucher_list = []
advance_voucher = frappe.db.sql("""
select
t1.name as voucher_no, t1.posting_date, t1.remark, t2.account,
t2.name as voucher_detail_no, {amount_query} as payment_amount, t2.is_advance
from
`tabJournal Voucher` t1, `tabJournal Voucher Detail` t2
""")
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
|
snahelou/awx
|
refs/heads/devel
|
awx/main/management/commands/register_queue.py
|
1
|
# Copyright (c) 2017 Ansible Tower by Red Hat
# All Rights Reserved.
import sys
from awx.main.utils.pglock import advisory_lock
from awx.main.models import Instance, InstanceGroup
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--queuename', dest='queuename', type='string',
help='Queue to create/update'),
make_option('--hostnames', dest='hostnames', type='string',
help='Comma-Delimited Hosts to add to the Queue'),
make_option('--controller', dest='controller', type='string', default='',
help='The controlling group (makes this an isolated group)'),
)
def handle(self, **options):
queuename = options.get('queuename')
if not queuename:
raise CommandError("Specify `--queuename` to use this command.")
changed = False
with advisory_lock('instance_group_registration_%s' % queuename):
ig = InstanceGroup.objects.filter(name=queuename)
control_ig = None
if options.get('controller'):
control_ig = InstanceGroup.objects.filter(name=options.get('controller')).first()
if ig.exists():
print("Instance Group already registered {}".format(ig[0].name))
ig = ig[0]
if control_ig and ig.controller_id != control_ig.pk:
ig.controller = control_ig
ig.save()
print("Set controller group {} on {}.".format(control_ig.name, ig.name))
changed = True
else:
print("Creating instance group {}".format(queuename))
ig = InstanceGroup(name=queuename)
if control_ig:
ig.controller = control_ig
ig.save()
changed = True
hostname_list = []
if options.get('hostnames'):
hostname_list = options.get('hostnames').split(",")
instance_list = [x.strip() for x in hostname_list if x]
for inst_name in instance_list:
instance = Instance.objects.filter(hostname=inst_name)
if instance.exists() and instance[0] not in ig.instances.all():
ig.instances.add(instance[0])
print("Added instance {} to {}".format(instance[0].hostname, ig.name))
changed = True
elif not instance.exists():
print("Instance does not exist: {}".format(inst_name))
if changed:
print('(changed: True)')
sys.exit(1)
else:
print("Instance already registered {}".format(instance[0].hostname))
if changed:
print('(changed: True)')
|
Asdamp/StabilityBugsAnalysis
|
refs/heads/master
|
Progetto/main.py
|
1
|
from openpyxl import load_workbook
from Progetto import parserGetAllClasses
from Progetto.diffClassiVersioni import diff
from Progetto.getClassiModificate import count_class_mod
from Progetto.request import getNumIssue
from Progetto.getCoreCallsInstabilityValues import getCoreCallsValues
#definizione delle variabili per indicare:
#nome del progetto e versioni di interesse
nomeCompleto="activemq-parent"
nomeProgetto="ActiveMQ"
numVerPrec="5.8.0"
numVerSucc="5.9.0"
#definizione dei percorsi delle directory da cui prelevare:
#i file del progetto e i file .dot generati da Doxygen
pathSourceVerPrec="C:\\eeqdsw\\source\\"+nomeCompleto+"-"+numVerPrec
pathSourceVerSucc="C:\\eeqdsw\\source\\"+nomeCompleto+"-"+numVerSucc
pathDotVerPrec="C:\\eeqdsw\\dot\\"+nomeCompleto+"-"+numVerPrec
pathDotVerSucc="C:\\eeqdsw\\dot\\"+nomeCompleto+"-"+numVerSucc
#definisco la prima parte dell'uri per fare le richieste a JIRA
uriProject="https://issues.apache.org/jira/rest/api/2/project/AMQ"
#invocazione dei metodi per ottenere i valori di interesse
classiVerPrec=parserGetAllClasses.parse(pathDotVerPrec)
classiVerSucc=parserGetAllClasses.parse(pathDotVerSucc)
numClassiEliminate=diff(classiVerPrec,classiVerSucc)
numClassiAggiunte=diff(classiVerSucc,classiVerPrec)
numClassiModificate=count_class_mod(pathSourceVerPrec,pathSourceVerSucc)
coreCallsInstabilityValues=getCoreCallsValues(pathDotVerPrec, pathDotVerSucc)
issues=getNumIssue(uriProject, numVerSucc)
#utilizzando la libreria openpyxl si riportano i risultati in un file .xlsx
wb=load_workbook("result.xlsx")
ws=wb.active
ws.append([nomeProgetto,numVerPrec,numVerSucc,len(classiVerPrec),len(classiVerSucc),numClassiEliminate,numClassiAggiunte,numClassiModificate,coreCallsInstabilityValues[0],coreCallsInstabilityValues[1],coreCallsInstabilityValues[2],coreCallsInstabilityValues[3],issues[0], issues[1],issues[2],issues[3]])
wb.save("result.xlsx")
|
julianschweizer/kernel_23.0.1.A.0.xxx
|
refs/heads/master
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
tizianasellitto/servo
|
refs/heads/master
|
tests/wpt/css-tests/css-text-decor-3_dev/xhtml1print/reference/support/generate-text-emphasis-line-height-tests.py
|
829
|
#!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-line-height-001 ~ 004 except
001z. They test the line height expansion in different directions. This
script outputs a list of all tests it generated in the format of Mozilla
reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-line-height-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis line height, {pos}, {wm}, {tag}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="text emphasis marks should expand the line height like ruby if necessary">
<link rel="match" href="text-emphasis-line-height-{index:03}-ref.html">
<p>Pass if the emphasis marks are {dir} the black line:</p>
{start}試験テスト{end}
'''
REF_FILE = 'text-emphasis-line-height-{:03}-ref.html'
REF_TEMPLATE='''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis line height, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are {dir} the black line:</p>
<div style="line-height: 1; border-{pos}: 1px solid black; writing-mode: {wm}; ruby-position: {posval}"><ruby>試<rt>●</rt>験<rt>●</rt>テ<rt>●</rt>ス<rt>●</rt>ト<rt>●</rt></ruby></div>
'''
STYLE1 = 'line-height: 1; border-{pos}: 1px solid black; ' + \
'writing-mode: {wm}; text-emphasis-position: {posval};'
STYLE2 = 'text-emphasis: circle;'
TAGS = [
# (tag, start, end)
('div', '<div style="{style1}{style2}">', '</div>'),
('span', '<div style="{style1}"><span style="{style2}">', '</span></div>'),
]
POSITIONS = [
# pos, text-emphasis-position, ruby-position,
# writing-modes, dir text
('top', 'over right', 'over',
['horizontal-tb'], 'below'),
('bottom', 'under right', 'under',
['horizontal-tb'], 'over'),
('right', 'over right', 'over',
['vertical-rl', 'vertical-lr'], 'to the left of'),
('left', 'over left', 'under',
['vertical-rl', 'vertical-lr'], 'to the right of'),
]
import string
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for (pos, emphasis_pos, ruby_pos, wms, dir) in POSITIONS:
idx += 1
ref_file = REF_FILE.format(idx)
content = REF_TEMPLATE.format(pos=pos, dir=dir, wm=wms[0], posval=ruby_pos)
write_file(ref_file, content)
suffix = iter(string.ascii_lowercase)
for wm in wms:
style1 = STYLE1.format(pos=pos, wm=wm, posval=emphasis_pos)
for (tag, start, end) in TAGS:
test_file = TEST_FILE.format(idx, next(suffix))
content = TEST_TEMPLATE.format(
pos=pos, wm=wm, tag=tag, index=idx, dir=dir,
start=start.format(style1=style1, style2=STYLE2), end=end)
write_file(test_file, content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
|
dpaiton/OpenPV
|
refs/heads/master
|
pv-core/analysis/python/plot_save_layer_timing.py
|
1
|
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadSparse as rs
import PVReadWeights as rw
import PVConversions as conv
import scipy.cluster.vq as sp
import math
from scipy.sparse import lil_matrix as lm
extended = False
l1a = rs.PVReadSparse(sys.argv[1], extended)
l1w = rw.PVReadWeights(sys.argv[2])
l2a = rs.PVReadSparse(sys.argv[3], extended)
l2w = rw.PVReadWeights(sys.argv[4])
l3a = rs.PVReadSparse(sys.argv[5], extended)
l3ow = rw.PVReadWeights(sys.argv[6])
l4oa = rs.PVReadSparse(sys.argv[7], extended)
l3fw = rw.PVReadWeights(sys.argv[8])
l4fa = rs.PVReadSparse(sys.argv[9], extended)
global l1wnxp
global l2wnxp
global l1weights
global l2weights
global lib2
l1anx = l1a.nx
"""
count = 0
count2 = 0
for h in range(10):
for g in range(10): # nx - patch size + 1
count3 = 0
for i in range(4): # patch size
for j in range(4): # patch size
print "i + j = ", i * 10 + j + g + h * 10 # i * nx + j + g
print "count3 = ", count3
print "count2 = ", count2
if (i + j) == 6:
count2+=1
count3+=1
count+=1
print
print "-----------"
print count
sys.exit()
"""
l1wnx = l1w.nx
l1wnxp = l1w.nxp
l2anx = l2a.nx
l2wnx = l2w.nx
l2wnxp = l2w.nxp
l3anx = l3a.nx
l3wnx = l3ow.nx
l3wnxp = l3ow.nxp
l4oanx = l4oa.nx
l4fanx = l4fa.nx
def l3o(k, l3nx):
print "at l3o"
k = int(k)
nx = k % l3nx
ny = k / l3nx
l4nx = l4oa.nx
diff = l3nx / float(l4nx)
cnx = nx / diff
cny = ny / diff
hrange = (l3wnxp / 2)
patch = l3oweights[k]
patch = np.reshape(patch, (l3wnxp, l3wnxp))
count = 0
for i in range(20):
i+=1
pastsec = lib3o[-i, 0:]
pastsec = np.reshape(pastsec, (l4nx, l4nx))
test = pastsec[cny-hrange:cny+hrange+1, cnx-hrange:cnx+hrange+1]
print
print "cny = ", cny
print "cnx = ", cnx
print "hrange = ", hrange
print "diff = ", diff
print "nx = ", nx
print "ny = ", ny
print "shape of patch = ", np.shape(patch)
print "shape 1 = ", np.shape(test)[0]
print "shape 2 = ", np.shape(test)[1]
print "l3nx = ", l3nx
print "l4nx = ", l4nx
print
for h in range(len(test)):
for j in range(len(test)):
if test[h, j] > 0:
count+=1
w = patch[h, j]
re = math.exp(-((i-1)/20.))
re = w * re
if count == 1:
fin = re
else:
fin = fin * re
return fin
def l3f(k, l3nx):
print "at l3f"
k = int(k)
nx = k % l3nx
ny = k / l3nx
l4nx = l4fa.nx
diff = l3nx / float(l4nx)
cnx = nx / diff
cny = ny / diff
hrange = (l3wnxp / 2)
patch = l3fweights[k]
patch = np.reshape(patch, (l3wnxp, l3wnxp))
count = 0
for i in range(20):
i+=1
pastsec = lib3f[-i, 0:]
pastsec = np.reshape(pastsec, (l4nx, l4nx))
test = pastsec[cny-hrange:cny+hrange+1, cnx-hrange:cnx+hrange+1]
for h in range(len(test)):
for j in range(len(test)):
if test[h, j] > 0:
count+=1
w = patch[h, j]
re = math.exp(-((i-1)/20.))
re = w * re
if count == 1:
fin = re
else:
fin = fin * re
return fin
def l2(k, l2nx):
print "at l2"
k = int(k)
nx = k % l2nx
ny = k / l2nx
l3nx = l3a.nx
diff = l2nx / float(l3nx)
cnx = nx / diff
cny = ny / diff
hrange = (l2wnxp / 2)
patch = l2weights[k]
patch = np.reshape(patch, (l2wnxp, l2wnxp))
ea = np.zeros((20, l2wnxp * l2wnxp))
count = 0
for i in range(20):
i+=1
#print "i = ", i
pastsec = lib2[-i, 0:]
pastsec = np.reshape(pastsec, (l3nx, l3nx))
test = pastsec[cny-hrange:cny+hrange, cnx-hrange:cnx+hrange]
for h in range(len(test)):
for j in range(len(test)):
if test[h, j] > 0:
count+=1
w = patch[h, j]
re = math.exp(-((i-1)/20.))
re = w * re
ea[i-1, l2wnxp * h + j] = re
#wherey = cny - hrange + h
#wherex = cnx - hrange + j
#newk = wherey * l2anx + wherex
#reso = l3o(newk, l3nx)
#resf = l3f(newk, l3nx)
#if count == 1:
# res = reso * resf * re
#else:
# res = res * reso * resf * re
#return res
ea = np.sum(ea, axis = 0)
ea = ea / float(np.sum(ea))
return ea
def l1(k, l2nx, l1nx, lib):
#k = 0*l1nx + 0
nx = k % l1nx
ny = k / l1nx
#print "l1wnxp = ", l1wnxp
diff = l1nx / float(l2nx)
print "diff = ", diff
sys.exit()
#print
#print "diff = ", diff
cnx = (nx+9) / diff
cny = (ny+9) / diff
hrange = (l1wnxp / 2)
#print "nx ny = ", nx, ny, cnx, cny
#print "hrange = ", hrange
patch = l1weights[k]
patch = np.reshape(patch, (l1wnxp, l1wnxp))
ea = np.zeros((20, l1wnxp * l1wnxp))
count = 0
for i in range(20):
i+=1
pastsec = lib[-i, 0:]
pastsec = np.reshape(pastsec, (l2nx, l2nx))
test = pastsec[ny:ny+l1wnxp, nx:nx+l1wnxp]
"""
print "k = ", k
pastsec = np.random.random_integers(0, 100, (l2nx, l2nx))
print "range", ny, ny+l1wnxp, nx, nx+l1wnxp
test = pastsec[ny:ny+l1wnxp, nx:nx+l1wnxp]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(pastsec, cmap=cm.jet, interpolation='nearest', vmin=np.min(pastsec), vmax=np.max(pastsec))
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.imshow(test, cmap=cm.jet, interpolation='nearest', vmin=np.min(pastsec), vmax=np.max(pastsec))
plt.show()
sys.exit()
"""
print "test = ", np.shape(test)
print test
#print "test[]", test
print "pastsec", np.shape(pastsec)
print "ny, nx", ny, nx
twee = 0
if np.shape(test[0]) or np.shape(test[1]) != 9:
twee +=1
for h in range(np.shape(test)[0]):
for j in range(np.shape(test)[1]):
if test[h, j] > 0:
count+=1
w = patch[h, j]
re = math.exp(-((i-1)/20.))
re = w * re
difh = np.shape(test[0]) - l1wnxp
difj = np.shape(test[1]) - l1wnxp
if twee != 0:
1
#print "difh = ", difh
#print "difj = ", difj
ea[i-1, l1wnxp * (h+difh) + (j+difj)] = re
if twee != 0:
1
#print "l1wnxp * h + j =", (l1wnxp * h + j)
ea = np.sum(ea, axis = 0)
if np.sum(ea) > 0.0:
ea = ea / float(np.sum(ea))
if math.isnan(ea[0]) == True:
print
print "isnan == True"
sys.exit()
return ea
print "l2anx = ", l2anx
print "l2wnx = ", l2wnx
print "l2wnxp = ", l2wnxp
print "l1anx = ", l1anx
print "l1wnx = ", l1wnx
print "l1wnxp = ", l1wnxp
print
diff = l1anx / l2anx
end = 50
cliff = (l1anx / 2) * l1anx + ((l1anx / 2)-10)
lib = np.zeros((1, l2anx * l2anx))
lib2 = np.zeros((1, l3anx * l3anx))
lib3o = np.zeros((1, l4oanx * l4oanx))
lib3f = np.zeros((1, l4fanx * l4fanx))
"""
for i in range(prewnx*prewnx):
if i == 0:
preweights = prew.next_patch()
else:
a = prew.next_patch()
preweights = np.vstack((preweights, a))
print preweights
print np.shape(preweights)
"""
for i in range(l1wnx*l1wnx):
if i == 0:
l1weights = l1w.next_patch()
else:
a = l1w.next_patch()
l1weights = np.vstack((l1weights, a))
"""
for i in range(l2wnx*l2wnx):
if i == 0:
l2weights = l2w.next_patch()
else:
a = l2w.next_patch()
l2weights = np.vstack((l2weights, a))
for i in range(l3wnx*l3wnx):
if i == 0:
l3oweights = l3ow.next_patch()
else:
a = l3ow.next_patch()
l3oweights = np.vstack((l3oweights, a))
for i in range(l3wnx*l3wnx):
if i == 0:
l3fweights = l3fw.next_patch()
else:
a = l3fw.next_patch()
l3fweights = np.vstack((l3fweights, a))
"""
print "cliff = ", cliff
l1ar = lm(((l1anx * l1anx), (l1wnxp * l1wnxp +1)))
for i in range(end):
l1A = l1a.next_record()
l2A = l2a.next_activity()
#l3A = l3a.next_activity()
#l4oA = l4oa.next_activity()
#l4fA = l4fa.next_activity()
l2A = np.reshape(l2A, (1, l2anx*l2anx))
lib = np.vstack((lib, l2A))
#l3A = np.reshape(l3A, (1, l3anx*l3anx))
#lib2 = np.vstack((lib2, l3A))
#l4oA = np.reshape(l4oA, (1, l4oanx*l4oanx))
#lib3o = np.vstack((lib3o, l4oA))
#l4fA = np.reshape(l4fA, (1, l4fanx*l4fanx))
#lib3f = np.vstack((lib3f, l4fA))
if len(l1A) > 0:
print "l1A = ", l1A
for g in range(len(l1A)):
a = l1(l1A[g], l2anx, l1anx, lib)
#print "a = ", a
#print "len(a) + 1 = ", len(a)+1
for h in range(len(a)+1):
#print "h",h
if h == (len(a)):
#print " h 1 = ", h
l1ar[l1A[g],(h)] += 1
else:
#print "h = ", h
if a[h] > 0.0:
l1ar[l1A[g], h] += a[h]
#print "shape = ", np.shape(a)
#print
#e = lm.toarray(l1ar)
#print l1ar
#print " l1A ", l1A
#print e[l1A[g], 0:]
#print np.shape(e[l1A[g], 0:])
#print "g = ", g
#e = lm.toarray(l1ar)
#print "l1ar = ", l1ar
#for l in range(len(l1A)):
# print e[l1A[l], 0:]
#sys.exit()
e = lm.toarray(l1ar)
print "l1ar s = s =", np.shape(l1ar)
sys.exit()
#######################
#def con(e):
#######################
for i in range(l1anx * l1anx):
if math.isnan(e[i, 0]) == True:
print "e = ", e
print "e[i] = ", e[i, 0:]
print "i=", i
print np.shape(e)
print
print "1st nan == True"
sys.exit()
for i in range(l1anx * l1anx):
if np.sum(e[i, 0:]) >= 1.0:
print "e[i, 0:]", e[i, 0:]
print "e[i, l2wnxp*l2wnxp]", e[i, l1wnxp*l1wnxp]
e[i, 0:] = e[i, 0:] / e[i, l1wnxp * l1wnxp]
if math.isnan(e[i, 0]) == True:
print "e = ", e
print "e[i] = ", e[i, 0:]
print "i=", i
print np.shape(e)
print
print "nan == True"
sys.exit()
e = np.delete(e, l1wnxp * l1wnxp, axis=1)
gq = lm(((l1anx * l1anx), (l1anx * l1anx * l1wnxp * l1wnxp)))
print "shape gq ", np.shape(gq)
count2 = 0
count = 0
for h in range(l1anx):
for g in range(l1anx): # nx - patch size + 1
count3 = 0
for i in range(l1wnxp): # patch size
for j in range(l1wnxp): # patch size
where = i * l1anx + j + g + h * l1anx
if count2 > ((l1anx*l1anx) - 2000):
print "where = ", where
print "count2 = ", count2
print "count3 = ", count3
print
gq[count2, where] = e[count2, count3]
#print "i + j = ", i * 10 + j + g + h * 10
#print "count = ", count
#print "count2 = ", count2
if (i + j) == l1wnxp + l1wnxp - 2:
count2+=1
count+=1
count3+=1
#print
#print "-----------"
print
print "gq = ", gq
print np.shape(gq)
print "fin"
|
Weihonghao/ECM
|
refs/heads/master
|
Vpy34/lib/python3.5/site-packages/tensorflow/python/training/proximal_adagrad.py
|
33
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ProximalAdagrad for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class ProximalAdagradOptimizer(optimizer.Optimizer):
# pylint: disable=line-too-long
"""Optimizer that implements the Proximal Adagrad algorithm.
See this [paper](http://papers.nips.cc/paper/3793-efficient-learning-using-forward-backward-splitting.pdf).
@@__init__
"""
def __init__(self, learning_rate, initial_accumulator_value=0.1,
l1_regularization_strength=0.0, l2_regularization_strength=0.0,
use_locking=False, name="ProximalAdagrad"):
"""Construct a new ProximalAdagrad optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
Raises:
ValueError: If the `initial_accumulator_value` is invalid.
"""
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value must be positive: %s" %
initial_accumulator_value)
super(ProximalAdagradOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
# Created in Initialize.
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
self._learning_rate_tensor = None
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
val = constant_op.constant(self._initial_accumulator_value,
shape=v.get_shape())
self._get_or_make_slot(v, val, "accumulator", self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,
name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength,
name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength,
name="l2_regularization_strength")
def _apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.apply_proximal_adagrad(
var, acc, self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad, use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.sparse_apply_proximal_adagrad(
var, acc, self._learning_rate_tensor,
self._l1_regularization_strength_tensor,
self._l2_regularization_strength_tensor,
grad.values, grad.indices,
use_locking=self._use_locking)
|
elzaggo/pydoop
|
refs/heads/develop
|
examples/self_contained/check_results.py
|
1
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import sys
import re
import logging
from collections import Counter
logging.basicConfig(level=logging.INFO)
from pydoop.hdfs import hdfs
import pydoop.test_support as pts
import pydoop.hadut as hadut
def compute_vc(input_dir):
fs = hdfs()
data = []
for x in fs.list_directory(input_dir):
with fs.open_file(x['path'], 'rt') as f:
data.append(f.read())
all_data = ''.join(data)
vowels = re.findall('[AEIOUY]', all_data.upper())
return Counter(vowels)
def get_res(output_dir):
return pts.parse_mr_output(hadut.collect_output(output_dir), vtype=int)
def check(measured_res, expected_res):
res = pts.compare_counts(measured_res, expected_res)
if res:
return "ERROR: %s" % res
else:
return "OK."
def main(argv):
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
input_dir = argv[1]
output_dir = argv[2]
logger.info("checking results")
measured_res = get_res(output_dir)
expected_res = compute_vc(input_dir)
logger.info(check(measured_res, expected_res))
if __name__ == "__main__":
main(sys.argv)
|
saimn/astropy
|
refs/heads/main
|
astropy/io/misc/__init__.py
|
19
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains miscellaneous utility functions for data
input/output with astropy.
"""
from .pickle_helpers import *
|
eonpatapon/rally
|
refs/heads/master
|
rally/verification/tempest/diff.py
|
7
|
# Copyright 2014 Dell Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from rally.verification.tempest import compare2html
class Diff(object):
def __init__(self, test_cases1, test_cases2, threshold):
"""Compare two verification results.
Compares two verification results and emits
desired output, csv, html, json or pprint.
:param test_cases1: older verification json
:param test_cases2: newer verification json
:param threshold: test time difference percentage threshold
"""
self.threshold = threshold
self.diffs = self._compare(test_cases1, test_cases2)
def _compare(self, tc1, tc2):
"""Compare two verification results.
:param tc1: first verification test cases json
:param tc2: second verification test cases json
Typical test case json schema:
"test_case_key": {
"traceback": "", # exists only for "fail" status
"reason": "", # exists only for "skip" status
"name": "",
"status": "",
"time": 0.0
}
"""
diffs = []
names1 = set(tc1.keys())
names2 = set(tc2.keys())
common_tests = list(names1.intersection(names2))
removed_tests = list(names1.difference(common_tests))
new_tests = list(names2.difference(common_tests))
for name in removed_tests:
diffs.append({"type": "removed_test", "test_name": name})
for name in new_tests:
diffs.append({"type": "new_test", "test_name": name})
for name in common_tests:
diffs.extend(self._diff_values(name, tc1[name], tc2[name]))
return diffs
def _diff_values(self, name, result1, result2):
fields = ["status", "time", "traceback", "reason"]
diffs = []
for field in fields:
val1 = result1.get(field, 0)
val2 = result2.get(field, 0)
if val1 != val2:
if field == "time":
max_ = max(float(val1), float(val2))
min_ = min(float(val1), float(val2))
time_threshold = ((max_ - min_) / (min_ or 1)) * 100
if time_threshold < self.threshold:
continue
diffs.append({
"field": field,
"type": "value_changed",
"test_name": name,
"val1": val1,
"val2": val2
})
return diffs
def to_csv(self):
rows = (("Type", "Field", "Value 1", "Value 2", "Test Name"),)
for res in self.diffs:
row = (res.get("type"), res.get("field", ""),
res.get("val1", ""), res.get("val2", ""),
res.get("test_name"))
rows = rows + (row,)
return rows
def to_json(self):
return json.dumps(self.diffs, sort_keys=True, indent=4)
def to_html(self):
return compare2html.create_report(self.diffs)
|
IndonesiaX/edx-platform
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_bulk_assertions.py
|
173
|
import ddt
import itertools
from xmodule.tests import BulkAssertionTest, BulkAssertionError
STATIC_PASSING_ASSERTIONS = (
('assertTrue', True),
('assertFalse', False),
('assertIs', 1, 1),
('assertEqual', 1, 1),
('assertEquals', 1, 1),
('assertIsNot', 1, 2),
('assertIsNone', None),
('assertIsNotNone', 1),
('assertIn', 1, (1, 2, 3)),
('assertNotIn', 5, (1, 2, 3)),
('assertIsInstance', 1, int),
('assertNotIsInstance', '1', int),
('assertItemsEqual', [1, 2, 3], [3, 2, 1])
)
STATIC_FAILING_ASSERTIONS = (
('assertTrue', False),
('assertFalse', True),
('assertIs', 1, 2),
('assertEqual', 1, 2),
('assertEquals', 1, 2),
('assertIsNot', 1, 1),
('assertIsNone', 1),
('assertIsNotNone', None),
('assertIn', 5, (1, 2, 3)),
('assertNotIn', 1, (1, 2, 3)),
('assertIsInstance', '1', int),
('assertNotIsInstance', 1, int),
('assertItemsEqual', [1, 1, 1], [1, 1])
)
CONTEXT_PASSING_ASSERTIONS = (
('assertRaises', KeyError, {}.__getitem__, '1'),
('assertRaisesRegexp', KeyError, "1", {}.__getitem__, '1'),
)
CONTEXT_FAILING_ASSERTIONS = (
('assertRaises', ValueError, lambda: None),
('assertRaisesRegexp', KeyError, "2", {}.__getitem__, '1'),
)
@ddt.ddt
class TestBulkAssertionTestCase(BulkAssertionTest):
# We have to use assertion methods from the base UnitTest class,
# so we make a number of super calls that skip BulkAssertionTest.
# pylint: disable=bad-super-call
def _run_assertion(self, assertion_tuple):
"""
Run the supplied tuple of (assertion, *args) as a method on this class.
"""
assertion, args = assertion_tuple[0], assertion_tuple[1:]
getattr(self, assertion)(*args)
def _raw_assert(self, assertion_name, *args, **kwargs):
"""
Run an un-modified assertion.
"""
# Use super(BulkAssertionTest) to make sure we get un-adulturated assertions
return getattr(super(BulkAssertionTest, self), 'assert' + assertion_name)(*args, **kwargs)
@ddt.data(*(STATIC_PASSING_ASSERTIONS + CONTEXT_PASSING_ASSERTIONS))
def test_passing_asserts_passthrough(self, assertion_tuple):
self._run_assertion(assertion_tuple)
@ddt.data(*(STATIC_FAILING_ASSERTIONS + CONTEXT_FAILING_ASSERTIONS))
def test_failing_asserts_passthrough(self, assertion_tuple):
with self._raw_assert('Raises', AssertionError) as context:
self._run_assertion(assertion_tuple)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
@ddt.data(*CONTEXT_PASSING_ASSERTIONS)
@ddt.unpack
def test_passing_context_assertion_passthrough(self, assertion, *args):
assertion_args = []
args = list(args)
exception = args.pop(0)
while not callable(args[0]):
assertion_args.append(args.pop(0))
function = args.pop(0)
with getattr(self, assertion)(exception, *assertion_args):
function(*args)
@ddt.data(*CONTEXT_FAILING_ASSERTIONS)
@ddt.unpack
def test_failing_context_assertion_passthrough(self, assertion, *args):
assertion_args = []
args = list(args)
exception = args.pop(0)
while not callable(args[0]):
assertion_args.append(args.pop(0))
function = args.pop(0)
with self._raw_assert('Raises', AssertionError) as context:
with getattr(self, assertion)(exception, *assertion_args):
function(*args)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
@ddt.data(*list(itertools.product(
CONTEXT_PASSING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_bulk_assert(self, passing_assertion, failing_assertion1, failing_assertion2):
contextmanager = self.bulk_assertions()
contextmanager.__enter__()
self._run_assertion(passing_assertion)
self._run_assertion(failing_assertion1)
self._run_assertion(failing_assertion2)
with self._raw_assert('Raises', BulkAssertionError) as context:
contextmanager.__exit__(None, None, None)
self._raw_assert('Equals', len(context.exception.errors), 2)
@ddt.data(*list(itertools.product(
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_nested_bulk_asserts(self, failing_assertion):
with self._raw_assert('Raises', BulkAssertionError) as context:
with self.bulk_assertions():
self._run_assertion(failing_assertion)
with self.bulk_assertions():
self._run_assertion(failing_assertion)
self._run_assertion(failing_assertion)
self._raw_assert('Equal', len(context.exception.errors), 3)
@ddt.data(*list(itertools.product(
CONTEXT_PASSING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS,
CONTEXT_FAILING_ASSERTIONS
)))
@ddt.unpack
def test_bulk_assert_closed(self, passing_assertion, failing_assertion1, failing_assertion2):
with self._raw_assert('Raises', BulkAssertionError) as context:
with self.bulk_assertions():
self._run_assertion(passing_assertion)
self._run_assertion(failing_assertion1)
self._raw_assert('Equals', len(context.exception.errors), 1)
with self._raw_assert('Raises', AssertionError) as context:
self._run_assertion(failing_assertion2)
self._raw_assert('NotIsInstance', context.exception, BulkAssertionError)
|
thanhacun/odoo
|
refs/heads/8.0
|
addons/product_extended/product_extended.py
|
185
|
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
class product_template(osv.osv):
_name = 'product.template'
_inherit = 'product.template'
def compute_price(self, cr, uid, product_ids, template_ids=False, recursive=False, test=False, real_time_accounting = False, context=None):
'''
Will return test dict when the test = False
Multiple ids at once?
testdict is used to inform the user about the changes to be made
'''
testdict = {}
if product_ids:
ids = product_ids
model = 'product.product'
else:
ids = template_ids
model = 'product.template'
for prod_id in ids:
bom_obj = self.pool.get('mrp.bom')
if model == 'product.product':
bom_id = bom_obj._bom_find(cr, uid, product_id=prod_id, context=context)
else:
bom_id = bom_obj._bom_find(cr, uid, product_tmpl_id=prod_id, context=context)
if bom_id:
# In recursive mode, it will first compute the prices of child boms
if recursive:
#Search the products that are components of this bom of prod_id
bom = bom_obj.browse(cr, uid, bom_id, context=context)
#Call compute_price on these subproducts
prod_set = set([x.product_id.id for x in bom.bom_line_ids])
res = self.compute_price(cr, uid, list(prod_set), recursive=recursive, test=test, real_time_accounting = real_time_accounting, context=context)
if test:
testdict.update(res)
#Use calc price to calculate and put the price on the product of the BoM if necessary
price = self._calc_price(cr, uid, bom_obj.browse(cr, uid, bom_id, context=context), test=test, real_time_accounting = real_time_accounting, context=context)
if test:
testdict.update({prod_id : price})
if test:
return testdict
else:
return True
def _calc_price(self, cr, uid, bom, test = False, real_time_accounting=False, context=None):
if context is None:
context={}
price = 0
uom_obj = self.pool.get("product.uom")
tmpl_obj = self.pool.get('product.template')
for sbom in bom.bom_line_ids:
my_qty = sbom.product_qty / sbom.product_efficiency
if not sbom.attribute_value_ids:
# No attribute_value_ids means the bom line is not variant specific
price += uom_obj._compute_price(cr, uid, sbom.product_id.uom_id.id, sbom.product_id.standard_price, sbom.product_uom.id) * my_qty
if bom.routing_id:
for wline in bom.routing_id.workcenter_lines:
wc = wline.workcenter_id
cycle = wline.cycle_nbr
hour = (wc.time_start + wc.time_stop + cycle * wc.time_cycle) * (wc.time_efficiency or 1.0)
price += wc.costs_cycle * cycle + wc.costs_hour * hour
price = self.pool.get('product.uom')._compute_price(cr,uid,bom.product_uom.id, price, bom.product_id.uom_id.id)
#Convert on product UoM quantities
if price > 0:
price = uom_obj._compute_price(cr, uid, bom.product_uom.id, price / bom.product_qty, bom.product_id.uom_id.id)
product = tmpl_obj.browse(cr, uid, bom.product_tmpl_id.id, context=context)
if not test:
if (product.valuation != "real_time" or not real_time_accounting):
tmpl_obj.write(cr, uid, [product.id], {'standard_price' : price}, context=context)
else:
#Call wizard function here
wizard_obj = self.pool.get("stock.change.standard.price")
ctx = context.copy()
ctx.update({'active_id': product.id, 'active_model': 'product.template'})
wiz_id = wizard_obj.create(cr, uid, {'new_price': price}, context=ctx)
wizard_obj.change_price(cr, uid, [wiz_id], context=ctx)
return price
class product_bom(osv.osv):
_inherit = 'mrp.bom'
_columns = {
'standard_price': fields.related('product_tmpl_id','standard_price',type="float",relation="product.product",string="Standard Price",store=False)
}
product_bom()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
aicjofs/android_kernel_fuhu_t8400n
|
refs/heads/custom
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
xshotD/pyglet
|
refs/heads/master
|
pyglet/gl/lib_glx.py
|
44
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
import pyglet.lib
from pyglet.gl.lib import missing_function, decorate_function
from pyglet.compat import asbytes
__all__ = ['link_GL', 'link_GLU', 'link_GLX']
gl_lib = pyglet.lib.load_library('GL')
glu_lib = pyglet.lib.load_library('GLU')
# Look for glXGetProcAddressARB extension, use it as fallback (for
# ATI fglrx and DRI drivers).
try:
glXGetProcAddressARB = getattr(gl_lib, 'glXGetProcAddressARB')
glXGetProcAddressARB.restype = POINTER(CFUNCTYPE(None))
glXGetProcAddressARB.argtypes = [POINTER(c_ubyte)]
_have_getprocaddress = True
except AttributeError:
_have_getprocaddress = False
def link_GL(name, restype, argtypes, requires=None, suggestions=None):
try:
func = getattr(gl_lib, name)
func.restype = restype
func.argtypes = argtypes
decorate_function(func, name)
return func
except AttributeError:
if _have_getprocaddress:
# Fallback if implemented but not in ABI
bname = cast(pointer(create_string_buffer(asbytes(name))), POINTER(c_ubyte))
addr = glXGetProcAddressARB(bname)
if addr:
ftype = CFUNCTYPE(*((restype,) + tuple(argtypes)))
func = cast(addr, ftype)
decorate_function(func, name)
return func
return missing_function(name, requires, suggestions)
link_GLX = link_GL
def link_GLU(name, restype, argtypes, requires=None, suggestions=None):
try:
func = getattr(glu_lib, name)
func.restype = restype
func.argtypes = argtypes
decorate_function(func, name)
return func
except AttributeError:
return missing_function(name, requires, suggestions)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.