repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
brabadu/Data-Structure-Zoo | refs/heads/master | 5-Stack and its applications/stack.py | 3 | """ Stack
thomas moll 2015
"""
class Stack(object):
def __init__(self):
self.size = 0
self.top = None
def __len__(self):
return self.size
def push(self, item):
if self.top is None:
self.top = StackNode(item)
else:
#self.top->[item1]->None
item1 = self.top
item2 = StackNode(item)
#self.top->[item2]->[item1]->None
self.top = item2
item2.next = item1
self.size += 1
def pop(self):
if self.top is None:
raise ValueError()
else:
top_data = self.top.data
# Skip over the top and set the next to the top
self.top = self.top.next
self.size -= 1
return top_data
def peek(self):
if self.top is None:
raise ValueError()
else:
return self.top.data
class StackNode(object):
""" Look familiar? """
def __init__(self, item):
self.data = item
self.next = None
def check_parenthesis(string):
stack = Stack()
# We're using a dict which allows us to do some fun things!
brackets = {'{':'}', '[':']', '(':')'}
for character in string:
# Keys being the left-side brackets
if character in brackets.keys():
stack.push(character)
# Values being the right-side brackets
if character in brackets.values():
try:
other = stack.pop()
# Check for it's pair using the dict earlier
if brackets[other] != character:
return False
except ValueError:
return False
# Make one last check that we don't have any extras
if stack.size != 0:
return False
else:
# If we've made it all the way through without incident
return True
|
TeamExodus/external_skia | refs/heads/EXODUS-6.0 | PRESUBMIT.py | 22 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Skia.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import csv
import fnmatch
import os
import re
import subprocess
import sys
import traceback
REVERT_CL_SUBJECT_PREFIX = 'Revert '
SKIA_TREE_STATUS_URL = 'http://skia-tree-status.appspot.com'
CQ_KEYWORDS_THAT_NEED_APPENDING = ('CQ_INCLUDE_TRYBOTS', 'CQ_EXTRA_TRYBOTS',
'CQ_EXCLUDE_TRYBOTS', 'CQ_TRYBOTS')
# Please add the complete email address here (and not just 'xyz@' or 'xyz').
PUBLIC_API_OWNERS = (
'reed@chromium.org',
'reed@google.com',
'bsalomon@chromium.org',
'bsalomon@google.com',
'djsollen@chromium.org',
'djsollen@google.com',
)
AUTHORS_FILE_NAME = 'AUTHORS'
DOCS_PREVIEW_URL = 'https://skia.org/?cl='
def _CheckChangeHasEol(input_api, output_api, source_file_filter=None):
"""Checks that files end with atleast one \n (LF)."""
eof_files = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
# Check that the file ends in atleast one newline character.
if len(contents) > 1 and contents[-1:] != '\n':
eof_files.append(f.LocalPath())
if eof_files:
return [output_api.PresubmitPromptWarning(
'These files should end in a newline character:',
items=eof_files)]
return []
def _PythonChecks(input_api, output_api):
"""Run checks on any modified Python files."""
pylint_disabled_warnings = (
'F0401', # Unable to import.
'E0611', # No name in module.
'W0232', # Class has no __init__ method.
'E1002', # Use of super on an old style class.
'W0403', # Relative import used.
'R0201', # Method could be a function.
'E1003', # Using class name in super.
'W0613', # Unused argument.
)
# Run Pylint on only the modified python files. Unfortunately it still runs
# Pylint on the whole file instead of just the modified lines.
affected_python_files = []
for affected_file in input_api.AffectedSourceFiles(None):
affected_file_path = affected_file.LocalPath()
if affected_file_path.endswith('.py'):
affected_python_files.append(affected_file_path)
return input_api.canned_checks.RunPylint(
input_api, output_api,
disabled_warnings=pylint_disabled_warnings,
white_list=affected_python_files)
def _IfDefChecks(input_api, output_api):
"""Ensures if/ifdef are not before includes. See skbug/3362 for details."""
comment_block_start_pattern = re.compile('^\s*\/\*.*$')
comment_block_middle_pattern = re.compile('^\s+\*.*')
comment_block_end_pattern = re.compile('^\s+\*\/.*$')
single_line_comment_pattern = re.compile('^\s*//.*$')
def is_comment(line):
return (comment_block_start_pattern.match(line) or
comment_block_middle_pattern.match(line) or
comment_block_end_pattern.match(line) or
single_line_comment_pattern.match(line))
empty_line_pattern = re.compile('^\s*$')
def is_empty_line(line):
return empty_line_pattern.match(line)
failing_files = []
for affected_file in input_api.AffectedSourceFiles(None):
affected_file_path = affected_file.LocalPath()
if affected_file_path.endswith('.cpp') or affected_file_path.endswith('.h'):
f = open(affected_file_path)
for line in f.xreadlines():
if is_comment(line) or is_empty_line(line):
continue
# The below will be the first real line after comments and newlines.
if line.startswith('#if 0 '):
pass
elif line.startswith('#if ') or line.startswith('#ifdef '):
failing_files.append(affected_file_path)
break
results = []
if failing_files:
results.append(
output_api.PresubmitError(
'The following files have #if or #ifdef before includes:\n%s\n\n'
'See skbug.com/3362 for why this should be fixed.' %
'\n'.join(failing_files)))
return results
def _CopyrightChecks(input_api, output_api, source_file_filter=None):
results = []
year_pattern = r'\d{4}'
year_range_pattern = r'%s(-%s)?' % (year_pattern, year_pattern)
years_pattern = r'%s(,%s)*,?' % (year_range_pattern, year_range_pattern)
copyright_pattern = (
r'Copyright (\([cC]\) )?%s \w+' % years_pattern)
for affected_file in input_api.AffectedSourceFiles(source_file_filter):
if 'third_party' in affected_file.LocalPath():
continue
contents = input_api.ReadFile(affected_file, 'rb')
if not re.search(copyright_pattern, contents):
results.append(output_api.PresubmitError(
'%s is missing a correct copyright header.' % affected_file))
return results
def _ToolFlags(input_api, output_api):
"""Make sure `{dm,nanobench}_flags.py test` passes if modified."""
results = []
sources = lambda x: ('dm_flags.py' in x.LocalPath() or
'nanobench_flags.py' in x.LocalPath())
for f in input_api.AffectedSourceFiles(sources):
if 0 != subprocess.call(['python', f.LocalPath(), 'test']):
results.append(output_api.PresubmitError('`python %s test` failed' % f))
return results
def _CommonChecks(input_api, output_api):
"""Presubmit checks common to upload and commit."""
results = []
sources = lambda x: (x.LocalPath().endswith('.h') or
x.LocalPath().endswith('.gypi') or
x.LocalPath().endswith('.gyp') or
x.LocalPath().endswith('.py') or
x.LocalPath().endswith('.sh') or
x.LocalPath().endswith('.m') or
x.LocalPath().endswith('.mm') or
x.LocalPath().endswith('.go') or
x.LocalPath().endswith('.c') or
x.LocalPath().endswith('.cc') or
x.LocalPath().endswith('.cpp'))
results.extend(
_CheckChangeHasEol(
input_api, output_api, source_file_filter=sources))
results.extend(_PythonChecks(input_api, output_api))
results.extend(_IfDefChecks(input_api, output_api))
results.extend(_CopyrightChecks(input_api, output_api,
source_file_filter=sources))
results.extend(_ToolFlags(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
"""Presubmit checks for the change on upload.
The following are the presubmit checks:
* Check change has one and only one EOL.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def _CheckTreeStatus(input_api, output_api, json_url):
"""Check whether to allow commit.
Args:
input_api: input related apis.
output_api: output related apis.
json_url: url to download json style status.
"""
tree_status_results = input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api, json_url=json_url)
if not tree_status_results:
# Check for caution state only if tree is not closed.
connection = input_api.urllib2.urlopen(json_url)
status = input_api.json.loads(connection.read())
connection.close()
if ('caution' in status['message'].lower() and
os.isatty(sys.stdout.fileno())):
# Display a prompt only if we are in an interactive shell. Without this
# check the commit queue behaves incorrectly because it considers
# prompts to be failures.
short_text = 'Tree state is: ' + status['general_state']
long_text = status['message'] + '\n' + json_url
tree_status_results.append(
output_api.PresubmitPromptWarning(
message=short_text, long_text=long_text))
else:
# Tree status is closed. Put in message about contacting sheriff.
connection = input_api.urllib2.urlopen(
SKIA_TREE_STATUS_URL + '/current-sheriff')
sheriff_details = input_api.json.loads(connection.read())
if sheriff_details:
tree_status_results[0]._message += (
'\n\nPlease contact the current Skia sheriff (%s) if you are trying '
'to submit a build fix\nand do not know how to submit because the '
'tree is closed') % sheriff_details['username']
return tree_status_results
def _CheckOwnerIsInAuthorsFile(input_api, output_api):
results = []
issue = input_api.change.issue
if issue and input_api.rietveld:
issue_properties = input_api.rietveld.get_issue_properties(
issue=int(issue), messages=False)
owner_email = issue_properties['owner_email']
try:
authors_content = ''
for line in open(AUTHORS_FILE_NAME):
if not line.startswith('#'):
authors_content += line
email_fnmatches = re.findall('<(.*)>', authors_content)
for email_fnmatch in email_fnmatches:
if fnmatch.fnmatch(owner_email, email_fnmatch):
# Found a match, the user is in the AUTHORS file break out of the loop
break
else:
# TODO(rmistry): Remove the below CLA messaging once a CLA checker has
# been added to the CQ.
results.append(
output_api.PresubmitError(
'The email %s is not in Skia\'s AUTHORS file.\n'
'Issue owner, this CL must include an addition to the Skia AUTHORS '
'file.\n'
'Googler reviewers, please check that the AUTHORS entry '
'corresponds to an email address in http://goto/cla-signers. If it '
'does not then ask the issue owner to sign the CLA at '
'https://developers.google.com/open-source/cla/individual '
'(individual) or '
'https://developers.google.com/open-source/cla/corporate '
'(corporate).'
% owner_email))
except IOError:
# Do not fail if authors file cannot be found.
traceback.print_exc()
input_api.logging.error('AUTHORS file not found!')
return results
def _CheckLGTMsForPublicAPI(input_api, output_api):
"""Check LGTMs for public API changes.
For public API files make sure there is an LGTM from the list of owners in
PUBLIC_API_OWNERS.
"""
results = []
requires_owner_check = False
for affected_file in input_api.AffectedFiles():
affected_file_path = affected_file.LocalPath()
file_path, file_ext = os.path.splitext(affected_file_path)
# We only care about files that end in .h and are under the top-level
# include dir.
if file_ext == '.h' and 'include' == file_path.split(os.path.sep)[0]:
requires_owner_check = True
if not requires_owner_check:
return results
lgtm_from_owner = False
issue = input_api.change.issue
if issue and input_api.rietveld:
issue_properties = input_api.rietveld.get_issue_properties(
issue=int(issue), messages=True)
if re.match(REVERT_CL_SUBJECT_PREFIX, issue_properties['subject'], re.I):
# It is a revert CL, ignore the public api owners check.
return results
# TODO(rmistry): Stop checking for COMMIT=false once crbug/470609 is
# resolved.
if issue_properties['cq_dry_run'] or re.search(
r'^COMMIT=false$', issue_properties['description'], re.M):
# Ignore public api owners check for dry run CLs since they are not
# going to be committed.
return results
match = re.search(r'^TBR=(.*)$', issue_properties['description'], re.M)
if match:
tbr_entries = match.group(1).strip().split(',')
for owner in PUBLIC_API_OWNERS:
if owner in tbr_entries or owner.split('@')[0] in tbr_entries:
# If an owner is specified in the TBR= line then ignore the public
# api owners check.
return results
if issue_properties['owner_email'] in PUBLIC_API_OWNERS:
# An owner created the CL that is an automatic LGTM.
lgtm_from_owner = True
messages = issue_properties.get('messages')
if messages:
for message in messages:
if (message['sender'] in PUBLIC_API_OWNERS and
'lgtm' in message['text'].lower()):
# Found an lgtm in a message from an owner.
lgtm_from_owner = True
break
if not lgtm_from_owner:
results.append(
output_api.PresubmitError(
'Since the CL is editing public API, you must have an LGTM from '
'one of: %s' % str(PUBLIC_API_OWNERS)))
return results
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook does the following:
* Adds a link to preview docs changes if there are any docs changes in the CL.
* Adds 'NOTRY=true' if the CL contains only docs changes.
* Adds 'NOTREECHECKS=true' for non master branch changes since they do not
need to be gated on the master branch's tree.
* Adds 'NOTRY=true' for non master branch changes since trybots do not yet
work on them.
"""
results = []
atleast_one_docs_change = False
all_docs_changes = True
for affected_file in change.AffectedFiles():
affected_file_path = affected_file.LocalPath()
file_path, _ = os.path.splitext(affected_file_path)
if 'site' == file_path.split(os.path.sep)[0]:
atleast_one_docs_change = True
else:
all_docs_changes = False
if atleast_one_docs_change and not all_docs_changes:
break
issue = cl.issue
rietveld_obj = cl.RpcServer()
if issue and rietveld_obj:
original_description = rietveld_obj.get_description(issue)
new_description = original_description
# If the change includes only doc changes then add NOTRY=true in the
# CL's description if it does not exist yet.
if all_docs_changes and not re.search(
r'^NOTRY=true$', new_description, re.M | re.I):
new_description += '\nNOTRY=true'
results.append(
output_api.PresubmitNotifyResult(
'This change has only doc changes. Automatically added '
'\'NOTRY=true\' to the CL\'s description'))
# If there is atleast one docs change then add preview link in the CL's
# description if it does not already exist there.
if atleast_one_docs_change and not re.search(
r'^DOCS_PREVIEW=.*', new_description, re.M | re.I):
# Automatically add a link to where the docs can be previewed.
new_description += '\nDOCS_PREVIEW= %s%s' % (DOCS_PREVIEW_URL, issue)
results.append(
output_api.PresubmitNotifyResult(
'Automatically added a link to preview the docs changes to the '
'CL\'s description'))
# If the target ref is not master then add NOTREECHECKS=true and NOTRY=true
# to the CL's description if it does not already exist there.
target_ref = rietveld_obj.get_issue_properties(issue, False).get(
'target_ref', '')
if target_ref != 'refs/heads/master':
if not re.search(
r'^NOTREECHECKS=true$', new_description, re.M | re.I):
new_description += "\nNOTREECHECKS=true"
results.append(
output_api.PresubmitNotifyResult(
'Branch changes do not need to rely on the master branch\'s '
'tree status. Automatically added \'NOTREECHECKS=true\' to the '
'CL\'s description'))
if not re.search(
r'^NOTRY=true$', new_description, re.M | re.I):
new_description += "\nNOTRY=true"
results.append(
output_api.PresubmitNotifyResult(
'Trybots do not yet work for non-master branches. '
'Automatically added \'NOTRY=true\' to the CL\'s description'))
# Read and process the HASHTAGS file.
hashtags_fullpath = os.path.join(change._local_root, 'HASHTAGS')
with open(hashtags_fullpath, 'rb') as hashtags_csv:
hashtags_reader = csv.reader(hashtags_csv, delimiter=',')
for row in hashtags_reader:
if not row or row[0].startswith('#'):
# Ignore empty lines and comments
continue
hashtag = row[0]
# Search for the hashtag in the description.
if re.search('#%s' % hashtag, new_description, re.M | re.I):
for mapped_text in row[1:]:
# Special case handling for CQ_KEYWORDS_THAT_NEED_APPENDING.
appended_description = _HandleAppendingCQKeywords(
hashtag, mapped_text, new_description, results, output_api)
if appended_description:
new_description = appended_description
continue
# Add the mapped text if it does not already exist in the
# CL's description.
if not re.search(
r'^%s$' % mapped_text, new_description, re.M | re.I):
new_description += '\n%s' % mapped_text
results.append(
output_api.PresubmitNotifyResult(
'Found \'#%s\', automatically added \'%s\' to the CL\'s '
'description' % (hashtag, mapped_text)))
# If the description has changed update it.
if new_description != original_description:
rietveld_obj.update_description(issue, new_description)
return results
def _HandleAppendingCQKeywords(hashtag, keyword_and_value, description,
results, output_api):
"""Handles the CQ keywords that need appending if specified in hashtags."""
keyword = keyword_and_value.split('=')[0]
if keyword in CQ_KEYWORDS_THAT_NEED_APPENDING:
# If the keyword is already in the description then append to it.
match = re.search(
r'^%s=(.*)$' % keyword, description, re.M | re.I)
if match:
old_values = match.group(1).split(';')
new_value = keyword_and_value.split('=')[1]
if new_value in old_values:
# Do not need to do anything here.
return description
# Update the description with the new values.
new_description = description.replace(
match.group(0), "%s;%s" % (match.group(0), new_value))
results.append(
output_api.PresubmitNotifyResult(
'Found \'#%s\', automatically appended \'%s\' to %s in '
'the CL\'s description' % (hashtag, new_value, keyword)))
return new_description
return None
def CheckChangeOnCommit(input_api, output_api):
"""Presubmit checks for the change on commit.
The following are the presubmit checks:
* Check change has one and only one EOL.
* Ensures that the Skia tree is open in
http://skia-tree-status.appspot.com/. Shows a warning if it is in 'Caution'
state and an error if it is in 'Closed' state.
"""
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(
_CheckTreeStatus(input_api, output_api, json_url=(
SKIA_TREE_STATUS_URL + '/banner-status?format=json')))
results.extend(_CheckLGTMsForPublicAPI(input_api, output_api))
results.extend(_CheckOwnerIsInAuthorsFile(input_api, output_api))
return results
|
fjxhkj/PTVS | refs/heads/master | Python/Product/Pyvot/Pyvot/xl/sheet.py | 18 | # Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the LICENSE.txt file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
from xl.cache import cache_result, enable_caching
import xl._impl.com_utils as com_utils
import xl._impl.table as table
from xl.range import Range, ExcelRangeError, _xlRange_from_corners, _xlRange_parse
# Worksheet
class Worksheet(object):
def __init__(self, xlSheet):
self.xlWorksheet = xlSheet
pass
# return 1-based index of empty column (column without content that we can write to)
def _findOpenColumn(self):
xlRange = self.xlWorksheet.UsedRange
# excel has a bug where if the sheet is completely empty, it still returns 1 column
# Check explicitly
if (xlRange.Count == 1):
if (self.xlWorksheet.Cells(1,1).Value == None):
return 1
# Else, pick the next column after the used range
cs = [c.Column for c in xlRange.Columns]
x = max(cs) # edge of used area
return x+1 # +1, move past edge
def __str__(self):
return self.name
@property
def name(self):
return self.xlWorksheet.Name
@cache_result
@enable_caching
def _getTableColumn(self, name):
"""Search through this worksheets for the given table column
Return a Range if found, else None."""
for t in self.tables:
rData = t._getTableColumn(name)
if rData != None:
return rData
return None
@cache_result
@property
@enable_caching
def tables(self):
"""Returns a list of all table-like things on the sheet"""
l = []
t = table.tableFromAutoFilter(self.xlWorksheet)
if t != None:
l.append(t)
los = self.xlWorksheet.ListObjects
for lo in los:
t = table.tableFromListObject(lo)
l.append(t)
return l
def _find_table_containing_range(self, range):
"""Search all Tables on the sheet for any that contain the given range.
Return None if not found."""
for t in self.tables:
if (t.rData.intersects(range)):
return t
return None
_default_workbook = None
# Top level object for Excel
class Workbook(object):
# Workbook() - creates a new excel instance
# Workbook(filename) - attaches to existing excel instance, throws error if file not open
def __init__(self, *args):
import win32com.client as win32
com_utils.ensure_excel_dispatch_support()
if (len(args) == 0):
# Create a new empty instance
excel = win32.gencache.EnsureDispatch('Excel.Application')
excel.Visible = True
self.xlWorkbook = excel.Workbooks.Add()
assert not self.xlWorkbook is None
elif (len(args) == 1):
if isinstance(args[0], basestring):
filename = args[0]
self.xlWorkbook = com_utils.get_running_xlWorkbook_for_filename(filename)
if (self.xlWorkbook == None):
self.xlWorkbook = com_utils.open_xlWorkbook(filename)
else:
assert hasattr(args[0], "CLSID"), "Expected workbook name or xlWorkbook"
self.xlWorkbook = args[0]
# $$$ fix this behavior
self.set_default_workbook(self)
@classmethod
def default_workbook(cls):
global _default_workbook
if _default_workbook == None:
cls.set_default_workbook( Workbook() )
return _default_workbook
@classmethod
def set_default_workbook(cls, workbook):
if (workbook == None):
raise ValueError("Can't set active workbook instance to None")
global _default_workbook
_default_workbook = workbook
@cache_result
@property
def active_sheet(self):
return Worksheet(self.xlWorkbook.ActiveSheet)
@cache_result
@property
def worksheets(self):
return [Worksheet(xlSheet) for xlSheet in self.xlWorkbook.Worksheets]
def view(self, obj, name=None, to=None):
"""Writes a Python iterable to an available location in the workbook, with an optional header (name).
The optional `to` argument specifies a location hint.
If None, the values are written to an empty column on the active sheet.
If `to` is a Range, the values are written to it (like Range.set, but with the header prepended)
If `to` is a Table, the values are written to a new column in the table."""
# Python version of splatting to cells.
if to is None:
ws = self.active_sheet
# $$$ is this where with_hidden should come from?
c = Range(ws.xlWorksheet.Columns(ws._findOpenColumn()), with_hidden=False)
elif isinstance(to, table.Table):
c = to.append_empty_columns(num_new_cols=1)
elif isinstance(to, Range):
c = to
else:
raise ValueError("'to' argument must be a Range, Table, or None")
# write a header, this will will cooperate with autofilters.
if (name == None):
name = "values"
if isinstance(obj, basestring):
obj = [ obj ]
obj = list(obj)
vals = [ name ] + obj
c.set(vals)
data_only = c._adjust_unfiltered_size(rows=-1)._offset_unfiltered(rows=1)
return data_only
def __str__(self):
return self.name
def __repr__(self):
return 'Workbook(%s)' % repr(self.name)
@property
def name(self):
return self.xlWorkbook.Name
@cache_result
@enable_caching
def get(self, object):
"""Returns a Range for the requested table column, named Excel range, or Excel address (ex. A1:B20)
The returned Range has been normalized (see Range.normalize()); if possible, it is clipped to an overlapping table's data area,
as well as the worksheet's `used range`."""
# First look for table names.
if type(object) is str:
r = self._getTableColumn(object)
if r != None:
return r
# Now look for excel ranges.
# Since this is the "smart" function, we normalize the result,
# i.e. A:A snaps to table data within column A
try:
r = self.range(object)
except ExcelRangeError:
msg = "failed to find range or table column: %s. " + \
"Note that table columns must be part of an AutoFilter or Table (Ctrl+T) in Excel in order to be found."
msg = msg % str(object)
raise ExcelRangeError(msg)
# normalize() may fail with an exception if the r doesn't intersect the used range
return r.normalize()
@cache_result
@enable_caching
def range(self, object):
"""Returns a Range for the requested named Excel range or Excel address.
The returned range is not normalized, e.g. range("A:A") returns a Range containing ~1mil rows,
rather than clipping to the 'used range' / table areas. See also `get`"""
# Named ranges are workbook wide, but we don't have a workbook lookup function. So explicitly
# check for them now.
r = self._get_named_range(object)
if r != None:
return r
# $$$ Is there a better way (avoid needing the sheet), especially for sheet qualified ranges?
xlSheet = self.xlWorkbook.ActiveSheet
# _xlRange_parse throws an ExcelRangeError if it fails
xlRange = _xlRange_parse(xlSheet, object)
# Un-normalized range is returned; if the user specifies A2:D10, they probably meant it
# $$$ what should with_hidden be here
return Range(xlRange, with_hidden=False)
# Get a Range by Name, or none
def _get_named_range(self, name):
name = name.lower()
for n in self.xlWorkbook.Names:
if n.Name.lower() == name:
r = n.RefersToRange
# excel allows Names that are bound directly to Values and not ranges on the spreadsheet
if r == None:
raise NotImplementedError("Name " + name + " is not backed by a range")
# $$$ what should with_hidden be here
return Range(r, with_hidden=False)
return None
@property
def named_ranges(self):
return [n.Name for n in self.xlWorkbook.Names]
def _getTableColumn(self, name):
"""Search through all worksheets for the given column
Return a Range if found, else None."""
active = self.active_sheet
r = active._getTableColumn(name)
if r != None: return r
for s in self.worksheets:
r = s._getTableColumn(name)
if r != None:
return r
return None
|
alshedivat/tensorflow | refs/heads/master | tensorflow/python/autograph/converters/decorators_test.py | 9 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for decorators module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import wraps
import imp
from tensorflow.python import autograph
from tensorflow.python.autograph.converters import decorators
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.platform import test
# The Python parser only briefly captures decorators into the AST.
# The interpreter desugars them on load, and the decorated function loses any
# trace of the decorator (which is normally what you would expect, since
# they are meant to be transparent).
# However, decorators are still visible when you analyze the function
# from inside a decorator, before it was applied - as is the case
# with our conversion decorators.
def simple_decorator(f):
return lambda a: f(a) + 1
def self_transform_decorator(transform):
def decorator(f):
@wraps(f)
def wrapper(*args):
# This removing wrapper is defined in the test below. This setup is so
# intricate in order to simulate how we use the transformer in practice.
transformed_f = transform(f, (self_transform_decorator,))
return transformed_f(*args) + 1
return wrapper
return decorator
class DecoratorsTest(converter_testing.TestCase):
def _transform(self, f, strip_decorators):
namespace = {
'self_transform_decorator': self_transform_decorator,
'simple_decorator': simple_decorator,
'converter_testing': converter_testing,
}
node, ctx = self.prepare(
f, namespace, recursive=False, strip_decorators=strip_decorators)
node = decorators.transform(node, ctx)
import_line = '\n'.join(ctx.program.additional_imports)
result, _ = compiler.ast_to_object(node, source_prefix=import_line)
return getattr(result, f.__name__)
def test_noop(self):
def test_fn(a):
return a
with self.converted(test_fn, decorators, {}) as result:
self.assertEqual(1, result.test_fn(1))
def test_function(self):
@self_transform_decorator(self._transform)
def test_fn(a):
return a
# 2 = 1 (a) + 1 (decorator applied exactly once)
self.assertEqual(2, test_fn(1))
def test_method(self):
class TestClass(object):
@self_transform_decorator(self._transform)
def test_fn(self, a):
return a
# 2 = 1 (a) + 1 (decorator applied exactly once)
self.assertEqual(2, TestClass().test_fn(1))
def test_multiple_decorators(self):
class TestClass(object):
# Note that reversing the order of this two doesn't work.
@classmethod
@self_transform_decorator(self._transform)
def test_fn(cls, a):
return a
# 2 = 1 (a) + 1 (decorator applied exactly once)
self.assertEqual(2, TestClass.test_fn(1))
def test_nested_decorators_local(self):
@self_transform_decorator(self._transform)
def test_fn(a):
@simple_decorator
def inner_fn(b):
return b + 11
return inner_fn(a)
# Expected to fail because simple_decorator could not be imported.
with self.assertRaises(transformer.AutographParseError):
test_fn(1)
def test_nested_decorators_imported(self):
@self_transform_decorator(self._transform)
def test_fn(a):
@converter_testing.imported_decorator
def inner_fn(b):
return b + 11
return inner_fn(a)
# Work around TensorFlow's symbol suppression mechanism that causes core to
# be invisible in the generated code.
core_mod = imp.new_module('core')
core_mod.converter_testing = converter_testing
autograph.core = core_mod
# 14 = 1 (a) + 1 (simple_decorator) + 11 (inner_fn)
self.assertEqual(14, test_fn(1))
if __name__ == '__main__':
test.main()
|
ndchorley/scipy | refs/heads/master | scipy/sparse/linalg/isolve/tests/test_utils.py | 131 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_raises
from scipy.sparse.linalg import utils
def test_make_system_bad_shape():
assert_raises(ValueError, utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))
|
anant-dev/django | refs/heads/master | tests/template_backends/test_django.py | 53 | from template_tests.test_response import test_processor_name
from django.template.backends.django import DjangoTemplates
from django.template.library import InvalidTemplateLibrary
from django.test import RequestFactory, override_settings
from .test_dummy import TemplateStringsTests
class DjangoTemplatesTests(TemplateStringsTests):
engine_class = DjangoTemplates
backend_name = 'django'
def test_context_has_priority_over_template_context_processors(self):
# See ticket #23789.
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'context_processors': [test_processor_name],
},
})
template = engine.from_string('{{ processors }}')
request = RequestFactory().get('/')
# Check that context processors run
content = template.render({}, request)
self.assertEqual(content, 'yes')
# Check that context overrides context processors
content = template.render({'processors': 'no'}, request)
self.assertEqual(content, 'no')
@override_settings(INSTALLED_APPS=['template_backends.apps.good'])
def test_templatetag_discovery(self):
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'libraries': {
'alternate': 'template_backends.apps.good.templatetags.good_tags',
'override': 'template_backends.apps.good.templatetags.good_tags',
},
},
})
# libraries are discovered from installed applications
self.assertEqual(
engine.engine.libraries['good_tags'],
'template_backends.apps.good.templatetags.good_tags',
)
self.assertEqual(
engine.engine.libraries['subpackage.tags'],
'template_backends.apps.good.templatetags.subpackage.tags',
)
# libraries are discovered from django.templatetags
self.assertEqual(
engine.engine.libraries['static'],
'django.templatetags.static',
)
# libraries passed in OPTIONS are registered
self.assertEqual(
engine.engine.libraries['alternate'],
'template_backends.apps.good.templatetags.good_tags',
)
# libraries passed in OPTIONS take precedence over discovered ones
self.assertEqual(
engine.engine.libraries['override'],
'template_backends.apps.good.templatetags.good_tags',
)
@override_settings(INSTALLED_APPS=['template_backends.apps.importerror'])
def test_templatetag_discovery_import_error(self):
"""
Import errors in tag modules should be reraised with a helpful message.
"""
with self.assertRaisesMessage(
InvalidTemplateLibrary,
"ImportError raised when trying to load "
"'template_backends.apps.importerror.templatetags.broken_tags'"
):
DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {},
})
def test_builtins_discovery(self):
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'builtins': ['template_backends.apps.good.templatetags.good_tags'],
},
})
self.assertEqual(
engine.engine.builtins, [
'django.template.defaulttags',
'django.template.defaultfilters',
'django.template.loader_tags',
'template_backends.apps.good.templatetags.good_tags',
]
)
|
blighli/SublimeHighlight | refs/heads/master | pygments/lexers/web.py | 197 | # -*- coding: utf-8 -*-
"""
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Lexers for web-related languages and markup.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \
include, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Other, Punctuation, Literal
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
html_doctype_matches, unirange
from pygments.lexers.agile import RubyLexer
from pygments.lexers.compiled import ScalaLexer
__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'JsonLexer', 'CssLexer',
'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer',
'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ScssLexer',
'ObjectiveJLexer', 'CoffeeScriptLexer', 'LiveScriptLexer',
'DuelLexer', 'ScamlLexer', 'JadeLexer', 'XQueryLexer',
'DtdLexer', 'DartLexer', 'LassoLexer', 'QmlLexer', 'TypeScriptLexer']
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js', ]
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript', ]
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|yield|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class JsonLexer(RegexLexer):
"""
For JSON data structures.
*New in Pygments 1.5.*
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = [ 'application/json', ]
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
flags = re.DOTALL
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r']', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
*New in Pygments 0.9.*
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~\^\*!%&<>\|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|var|with|new|typeof|arguments|instanceof|this|'
r'switch)\b', Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(r'(Accessibility|AccessibilityProperties|ActionScriptVersion|'
r'ActivityEvent|AntiAliasType|ApplicationDomain|AsBroadcaster|Array|'
r'AsyncErrorEvent|AVM1Movie|BevelFilter|Bitmap|BitmapData|'
r'BitmapDataChannel|BitmapFilter|BitmapFilterQuality|BitmapFilterType|'
r'BlendMode|BlurFilter|Boolean|ByteArray|Camera|Capabilities|CapsStyle|'
r'Class|Color|ColorMatrixFilter|ColorTransform|ContextMenu|'
r'ContextMenuBuiltInItems|ContextMenuEvent|ContextMenuItem|'
r'ConvultionFilter|CSMSettings|DataEvent|Date|DefinitionError|'
r'DeleteObjectSample|Dictionary|DisplacmentMapFilter|DisplayObject|'
r'DisplacmentMapFilterMode|DisplayObjectContainer|DropShadowFilter|'
r'Endian|EOFError|Error|ErrorEvent|EvalError|Event|EventDispatcher|'
r'EventPhase|ExternalInterface|FileFilter|FileReference|'
r'FileReferenceList|FocusDirection|FocusEvent|Font|FontStyle|FontType|'
r'FrameLabel|FullScreenEvent|Function|GlowFilter|GradientBevelFilter|'
r'GradientGlowFilter|GradientType|Graphics|GridFitType|HTTPStatusEvent|'
r'IBitmapDrawable|ID3Info|IDataInput|IDataOutput|IDynamicPropertyOutput'
r'IDynamicPropertyWriter|IEventDispatcher|IExternalizable|'
r'IllegalOperationError|IME|IMEConversionMode|IMEEvent|int|'
r'InteractiveObject|InterpolationMethod|InvalidSWFError|InvokeEvent|'
r'IOError|IOErrorEvent|JointStyle|Key|Keyboard|KeyboardEvent|KeyLocation|'
r'LineScaleMode|Loader|LoaderContext|LoaderInfo|LoadVars|LocalConnection|'
r'Locale|Math|Matrix|MemoryError|Microphone|MorphShape|Mouse|MouseEvent|'
r'MovieClip|MovieClipLoader|Namespace|NetConnection|NetStatusEvent|'
r'NetStream|NewObjectSample|Number|Object|ObjectEncoding|PixelSnapping|'
r'Point|PrintJob|PrintJobOptions|PrintJobOrientation|ProgressEvent|Proxy|'
r'QName|RangeError|Rectangle|ReferenceError|RegExp|Responder|Sample|Scene|'
r'ScriptTimeoutError|Security|SecurityDomain|SecurityError|'
r'SecurityErrorEvent|SecurityPanel|Selection|Shape|SharedObject|'
r'SharedObjectFlushStatus|SimpleButton|Socket|Sound|SoundChannel|'
r'SoundLoaderContext|SoundMixer|SoundTransform|SpreadMethod|Sprite|'
r'StackFrame|StackOverflowError|Stage|StageAlign|StageDisplayState|'
r'StageQuality|StageScaleMode|StaticText|StatusEvent|String|StyleSheet|'
r'SWFVersion|SyncEvent|SyntaxError|System|TextColorType|TextField|'
r'TextFieldAutoSize|TextFieldType|TextFormat|TextFormatAlign|'
r'TextLineMetrics|TextRenderer|TextSnapshot|Timer|TimerEvent|Transform|'
r'TypeError|uint|URIError|URLLoader|URLLoaderDataFormat|URLRequest|'
r'URLRequestHeader|URLRequestMethod|URLStream|URLVariabeles|VerifyError|'
r'Video|XML|XMLDocument|XMLList|XMLNode|XMLNodeType|XMLSocket|XMLUI)\b',
Name.Builtin),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b',Name.Function),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
*New in Pygments 0.11.*
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
identifier = r'[$a-zA-Z_][a-zA-Z0-9_]*'
typeidentifier = identifier + '(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~\^\*!%&<>\|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s*', Text, '#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',?', Operator, '#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'{', Punctuation, 'content'),
(r'\:[a-zA-Z0-9_-]+', Name.Decorator),
(r'\.[a-zA-Z0-9_-]+', Name.Class),
(r'\#[a-zA-Z0-9_-]+', Name.Function),
(r'@[a-zA-Z0-9_-]+', Keyword, 'atrule'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'[~\^\*!%&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow(?:-x|-y)?|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Keyword),
(r'(indigo|gold|firebrick|indianred|yellow|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|black|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|silver|skyblue|gray|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|teal|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|navy|orchid|blue|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|red|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|purple|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|lime|palegreen|burlywood|'
r'seashell|mediumspringgreen|fuchsia|papayawhip|blanchedalmond|'
r'peru|aquamarine|white|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|olive|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|aqua|darkgoldenrod|maroon|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|green|'
r'blueviolet|peachpuff)\b', Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[\.-]?[0-9]*[\.]?[0-9]+(em|px|\%|pt|pc|in|mm|cm|ex|s)\b', Number),
(r'-?[0-9]+', Number),
(r'[~\^\*!%&<>\|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name)
]
}
class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
*New in Pygments 1.3.*
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[\+-]' + _ws + r')([\(a-zA-Z_].*?[^\(])(' + _ws + '{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
include('statements'),
('[{\(\)}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop'),
],
'badregex': [
(r'\n', Text, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname' : [
# interface definition that inherits
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r':' + _ws +
r')([a-zA-Z_][a-zA-Z0-9_]*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r'\()([a-zA-Z_][a-zA-Z0-9_]*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_][a-zA-Z0-9_]*)', Name.Class, '#pop'),
],
'forward_classname' : [
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*,\s*)',
bygroups(Name.Class, Text), '#push'),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups (Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(Name.Function), "#pop"),
('', Text, '#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^\)]+)' # type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'\.\.\.)', using(this)),
# param name
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', Text),
],
'expression' : [
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
# note: this allows tag names not used in HTML like <x:with-dash>,
# this is to support yet-unknown template engines and the like
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
(r'[a-zA-Z0-9_:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_phpbuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._phpbuiltins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]', '*.inc']
mimetypes = ['text/x-php']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<(\'?)([a-zA-Z_][a-zA-Z0-9_]*)\1\n.*?\n\2\;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/?@-]+', Operator),
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
(r'[\\a-zA-Z_][\\a-zA-Z0-9_]*', Name.Other),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(r'[a-zA-Z_][\\a-zA-Z0-9_]*', Name.Class, '#pop')
],
'functionname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt\"$\\]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape),
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
for key, value in MODULES.iteritems():
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
if '?>' in text:
rv += 0.1
return rv
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
*New in Pygments 1.5.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL | re.UNICODE
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.5
class XsltLexer(XmlLexer):
'''
A lexer for XSLT.
*New in Pygments 0.10.*
'''
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = set([
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
])
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
*New in Pygments 1.1.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[a-zA-Z0-9:._-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:._-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class HaxeLexer(ExtendedRegexLexer):
"""
For Haxe source code (http://haxe.org/).
*New in Pygments 1.3.*
"""
name = 'Haxe'
aliases = ['hx', 'Haxe', 'haxe', 'haXe', 'hxsl']
filenames = ['*.hx', '*.hxsl']
mimetypes = ['text/haxe', 'text/x-haxe', 'text/x-hx']
# keywords extracted from lexer.mll in the haxe compiler source
keyword = (r'(?:function|class|static|var|if|else|while|do|for|'
r'break|return|continue|extends|implements|import|'
r'switch|case|default|public|private|try|untyped|'
r'catch|new|this|throw|extern|enum|in|interface|'
r'cast|override|dynamic|typedef|package|'
r'inline|using|null|true|false|abstract)\b')
# idtype in lexer.mll
typeid = r'_*[A-Z][_a-zA-Z0-9]*'
# combined ident and dollar and idtype
ident = r'(?:_*[a-z][_a-zA-Z0-9]*|_+[0-9][_a-zA-Z0-9]*|' + typeid + \
'|_+|\$[_a-zA-Z0-9]+)'
binop = (r'(?:%=|&=|\|=|\^=|\+=|\-=|\*=|/=|<<=|>\s*>\s*=|>\s*>\s*>\s*=|==|'
r'!=|<=|>\s*=|&&|\|\||<<|>>>|>\s*>|\.\.\.|<|>|%|&|\||\^|\+|\*|'
r'/|\-|=>|=)')
# ident except keywords
ident_no_keyword = r'(?!' + keyword + ')' + ident
flags = re.DOTALL | re.MULTILINE
preproc_stack = []
def preproc_callback(self, match, ctx):
proc = match.group(2)
if proc == 'if':
# store the current stack
self.preproc_stack.append(ctx.stack[:])
elif proc in ['else', 'elseif']:
# restore the stack back to right before #if
if self.preproc_stack: ctx.stack = self.preproc_stack[-1][:]
elif proc == 'end':
# remove the saved stack of previous #if
if self.preproc_stack: self.preproc_stack.pop()
# #if and #elseif should follow by an expr
if proc in ['if', 'elseif']:
ctx.stack.append('preproc-expr')
# #error can be optionally follow by the error msg
if proc in ['error']:
ctx.stack.append('preproc-error')
yield match.start(), Comment.Preproc, '#' + proc
ctx.pos = match.end()
tokens = {
'root': [
include('spaces'),
include('meta'),
(r'(?:package)\b', Keyword.Namespace, ('semicolon', 'package')),
(r'(?:import)\b', Keyword.Namespace, ('semicolon', 'import')),
(r'(?:using)\b', Keyword.Namespace, ('semicolon', 'using')),
(r'(?:extern|private)\b', Keyword.Declaration),
(r'(?:abstract)\b', Keyword.Declaration, 'abstract'),
(r'(?:class|interface)\b', Keyword.Declaration, 'class'),
(r'(?:enum)\b', Keyword.Declaration, 'enum'),
(r'(?:typedef)\b', Keyword.Declaration, 'typedef'),
# top-level expression
# although it is not supported in haxe, but it is common to write
# expression in web pages the positive lookahead here is to prevent
# an infinite loop at the EOF
(r'(?=.)', Text, 'expr-statement'),
],
# space/tab/comment/preproc
'spaces': [
(r'\s+', Text),
(r'//[^\n\r]*', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(#)(if|elseif|else|end|error)\b', preproc_callback),
],
'string-single-interpol': [
(r'\$\{', String.Interpol, ('string-interpol-close', 'expr')),
(r'\$\$', String.Escape),
(r'\$(?=' + ident + ')', String.Interpol, 'ident'),
include('string-single'),
],
'string-single': [
(r"'", String.Single, '#pop'),
(r'\\.', String.Escape),
(r'.', String.Single),
],
'string-double': [
(r'"', String.Double, '#pop'),
(r'\\.', String.Escape),
(r'.', String.Double),
],
'string-interpol-close': [
(r'\$'+ident, String.Interpol),
(r'\}', String.Interpol, '#pop'),
],
'package': [
include('spaces'),
(ident, Name.Namespace),
(r'\.', Punctuation, 'import-ident'),
(r'', Text, '#pop'),
],
'import': [
include('spaces'),
(ident, Name.Namespace),
(r'\*', Keyword), # wildcard import
(r'\.', Punctuation, 'import-ident'),
(r'in', Keyword.Namespace, 'ident'),
(r'', Text, '#pop'),
],
'import-ident': [
include('spaces'),
(r'\*', Keyword, '#pop'), # wildcard import
(ident, Name.Namespace, '#pop'),
],
'using': [
include('spaces'),
(ident, Name.Namespace),
(r'\.', Punctuation, 'import-ident'),
(r'', Text, '#pop'),
],
'preproc-error': [
(r'\s+', Comment.Preproc),
(r"'", String.Single, ('#pop', 'string-single')),
(r'"', String.Double, ('#pop', 'string-double')),
(r'', Text, '#pop'),
],
'preproc-expr': [
(r'\s+', Comment.Preproc),
(r'\!', Comment.Preproc),
(r'\(', Comment.Preproc, ('#pop', 'preproc-parenthesis')),
(ident, Comment.Preproc, '#pop'),
(r"'", String.Single, ('#pop', 'string-single')),
(r'"', String.Double, ('#pop', 'string-double')),
],
'preproc-parenthesis': [
(r'\s+', Comment.Preproc),
(r'\)', Comment.Preproc, '#pop'),
('', Text, 'preproc-expr-in-parenthesis'),
],
'preproc-expr-chain': [
(r'\s+', Comment.Preproc),
(binop, Comment.Preproc, ('#pop', 'preproc-expr-in-parenthesis')),
(r'', Text, '#pop'),
],
# same as 'preproc-expr' but able to chain 'preproc-expr-chain'
'preproc-expr-in-parenthesis': [
(r'\s+', Comment.Preproc),
(r'\!', Comment.Preproc),
(r'\(', Comment.Preproc,
('#pop', 'preproc-expr-chain', 'preproc-parenthesis')),
(ident, Comment.Preproc, ('#pop', 'preproc-expr-chain')),
(r"'", String.Single,
('#pop', 'preproc-expr-chain', 'string-single')),
(r'"', String.Double,
('#pop', 'preproc-expr-chain', 'string-double')),
],
'abstract' : [
include('spaces'),
(r'', Text, ('#pop', 'abstract-body', 'abstract-relation',
'abstract-opaque', 'type-param-constraint', 'type-name')),
],
'abstract-body' : [
include('spaces'),
(r'\{', Punctuation, ('#pop', 'class-body')),
],
'abstract-opaque' : [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'parenthesis-close', 'type')),
(r'', Text, '#pop'),
],
'abstract-relation': [
include('spaces'),
(r'(?:to|from)', Keyword.Declaration, 'type'),
(r',', Punctuation),
(r'', Text, '#pop'),
],
'meta': [
include('spaces'),
(r'@', Name.Decorator, ('meta-body', 'meta-ident', 'meta-colon')),
],
# optional colon
'meta-colon': [
include('spaces'),
(r':', Name.Decorator, '#pop'),
(r'', Text, '#pop'),
],
# same as 'ident' but set token as Name.Decorator instead of Name
'meta-ident': [
include('spaces'),
(ident, Name.Decorator, '#pop'),
],
'meta-body': [
include('spaces'),
(r'\(', Name.Decorator, ('#pop', 'meta-call')),
(r'', Text, '#pop'),
],
'meta-call': [
include('spaces'),
(r'\)', Name.Decorator, '#pop'),
(r'', Text, ('#pop', 'meta-call-sep', 'expr')),
],
'meta-call-sep': [
include('spaces'),
(r'\)', Name.Decorator, '#pop'),
(r',', Punctuation, ('#pop', 'meta-call')),
],
'typedef': [
include('spaces'),
(r'', Text, ('#pop', 'typedef-body', 'type-param-constraint',
'type-name')),
],
'typedef-body': [
include('spaces'),
(r'=', Operator, ('#pop', 'optional-semicolon', 'type')),
],
'enum': [
include('spaces'),
(r'', Text, ('#pop', 'enum-body', 'bracket-open',
'type-param-constraint', 'type-name')),
],
'enum-body': [
include('spaces'),
include('meta'),
(r'\}', Punctuation, '#pop'),
(ident_no_keyword, Name, ('enum-member', 'type-param-constraint')),
],
'enum-member': [
include('spaces'),
(r'\(', Punctuation,
('#pop', 'semicolon', 'flag', 'function-param')),
(r'', Punctuation, ('#pop', 'semicolon', 'flag')),
],
'class': [
include('spaces'),
(r'', Text, ('#pop', 'class-body', 'bracket-open', 'extends',
'type-param-constraint', 'type-name')),
],
'extends': [
include('spaces'),
(r'(?:extends|implements)\b', Keyword.Declaration, 'type'),
(r',', Punctuation), # the comma is made optional here, since haxe2
# requires the comma but haxe3 does not allow it
(r'', Text, '#pop'),
],
'bracket-open': [
include('spaces'),
(r'\{', Punctuation, '#pop'),
],
'bracket-close': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
],
'class-body': [
include('spaces'),
include('meta'),
(r'\}', Punctuation, '#pop'),
(r'(?:static|public|private|override|dynamic|inline|macro)\b',
Keyword.Declaration),
(r'', Text, 'class-member'),
],
'class-member': [
include('spaces'),
(r'(var)\b', Keyword.Declaration,
('#pop', 'optional-semicolon', 'prop')),
(r'(function)\b', Keyword.Declaration,
('#pop', 'optional-semicolon', 'class-method')),
],
# local function, anonymous or not
'function-local': [
include('spaces'),
(r'(' + ident_no_keyword + ')?', Name.Function,
('#pop', 'expr', 'flag', 'function-param',
'parenthesis-open', 'type-param-constraint')),
],
'optional-expr': [
include('spaces'),
include('expr'),
(r'', Text, '#pop'),
],
'class-method': [
include('spaces'),
(ident, Name.Function, ('#pop', 'optional-expr', 'flag',
'function-param', 'parenthesis-open',
'type-param-constraint')),
],
# function arguments
'function-param': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
(r'\?', Punctuation),
(ident_no_keyword, Name,
('#pop', 'function-param-sep', 'assign', 'flag')),
],
'function-param-sep': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'function-param')),
],
# class property
# eg. var prop(default, null):String;
'prop': [
include('spaces'),
(ident_no_keyword, Name, ('#pop', 'assign', 'flag', 'prop-get-set')),
],
'prop-get-set': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'parenthesis-close',
'prop-get-set-opt', 'comma', 'prop-get-set-opt')),
(r'', Text, '#pop'),
],
'prop-get-set-opt': [
include('spaces'),
(r'(?:default|null|never|dynamic|get|set)\b', Keyword, '#pop'),
(ident_no_keyword, Text, '#pop'), #custom getter/setter
],
'expr-statement': [
include('spaces'),
# makes semicolon optional here, just to avoid checking the last
# one is bracket or not.
(r'', Text, ('#pop', 'optional-semicolon', 'expr')),
],
'expr': [
include('spaces'),
(r'@', Name.Decorator, ('#pop', 'optional-expr', 'meta-body',
'meta-ident', 'meta-colon')),
(r'(?:\+\+|\-\-|~(?!/)|!|\-)', Operator),
(r'\(', Punctuation, ('#pop', 'expr-chain', 'parenthesis')),
(r'(?:inline)\b', Keyword.Declaration),
(r'(?:function)\b', Keyword.Declaration, ('#pop', 'expr-chain',
'function-local')),
(r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket')),
(r'(?:true|false|null)\b', Keyword.Constant, ('#pop', 'expr-chain')),
(r'(?:this)\b', Keyword, ('#pop', 'expr-chain')),
(r'(?:cast)\b', Keyword, ('#pop', 'expr-chain', 'cast')),
(r'(?:try)\b', Keyword, ('#pop', 'catch', 'expr')),
(r'(?:var)\b', Keyword.Declaration, ('#pop', 'var')),
(r'(?:new)\b', Keyword, ('#pop', 'expr-chain', 'new')),
(r'(?:switch)\b', Keyword, ('#pop', 'switch')),
(r'(?:if)\b', Keyword, ('#pop', 'if')),
(r'(?:do)\b', Keyword, ('#pop', 'do')),
(r'(?:while)\b', Keyword, ('#pop', 'while')),
(r'(?:for)\b', Keyword, ('#pop', 'for')),
(r'(?:untyped|throw)\b', Keyword),
(r'(?:return)\b', Keyword, ('#pop', 'optional-expr')),
(r'(?:macro)\b', Keyword, ('#pop', 'macro')),
(r'(?:continue|break)\b', Keyword, '#pop'),
(r'(?:\$\s*[a-z]\b|\$(?!'+ident+'))', Name, ('#pop', 'dollar')),
(ident_no_keyword, Name, ('#pop', 'expr-chain')),
# Float
(r'\.[0-9]+', Number.Float, ('#pop', 'expr-chain')),
(r'[0-9]+[eE][\+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')),
(r'[0-9]+\.[0-9]*[eE][\+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')),
(r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'expr-chain')),
(r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'expr-chain')),
# Int
(r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'expr-chain')),
(r'[0-9]+', Number.Integer, ('#pop', 'expr-chain')),
# String
(r"'", String.Single, ('#pop', 'expr-chain', 'string-single-interpol')),
(r'"', String.Double, ('#pop', 'expr-chain', 'string-double')),
# EReg
(r'~/(\\\\|\\/|[^/\n])*/[gimsu]*', String.Regex, ('#pop', 'expr-chain')),
# Array
(r'\[', Punctuation, ('#pop', 'expr-chain', 'array-decl')),
],
'expr-chain': [
include('spaces'),
(r'(?:\+\+|\-\-)', Operator),
(binop, Operator, ('#pop', 'expr')),
(r'(?:in)\b', Keyword, ('#pop', 'expr')),
(r'\?', Operator, ('#pop', 'expr', 'ternary', 'expr')),
(r'(\.)(' + ident_no_keyword + ')', bygroups(Punctuation, Name)),
(r'\[', Punctuation, 'array-access'),
(r'\(', Punctuation, 'call'),
(r'', Text, '#pop'),
],
# macro reification
'macro': [
include('spaces'),
(r':', Punctuation, ('#pop', 'type')),
(r'', Text, ('#pop', 'expr')),
],
# cast can be written as "cast expr" or "cast(expr, type)"
'cast': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'parenthesis-close',
'cast-type', 'expr')),
(r'', Text, ('#pop', 'expr')),
],
# optionally give a type as the 2nd argument of cast()
'cast-type': [
include('spaces'),
(r',', Punctuation, ('#pop', 'type')),
(r'', Text, '#pop'),
],
'catch': [
include('spaces'),
(r'(?:catch)\b', Keyword, ('expr', 'function-param',
'parenthesis-open')),
(r'', Text, '#pop'),
],
# do-while loop
'do': [
include('spaces'),
(r'', Punctuation, ('#pop', 'do-while', 'expr')),
],
# the while after do
'do-while': [
include('spaces'),
(r'(?:while)\b', Keyword, ('#pop', 'parenthesis',
'parenthesis-open')),
],
'while': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')),
],
'for': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')),
],
'if': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'else', 'optional-semicolon', 'expr',
'parenthesis')),
],
'else': [
include('spaces'),
(r'(?:else)\b', Keyword, ('#pop', 'expr')),
(r'', Text, '#pop'),
],
'switch': [
include('spaces'),
(r'', Text, ('#pop', 'switch-body', 'bracket-open', 'expr')),
],
'switch-body': [
include('spaces'),
(r'(?:case|default)\b', Keyword, ('case-block', 'case')),
(r'\}', Punctuation, '#pop'),
],
'case': [
include('spaces'),
(r':', Punctuation, '#pop'),
(r'', Text, ('#pop', 'case-sep', 'case-guard', 'expr')),
],
'case-sep': [
include('spaces'),
(r':', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'case')),
],
'case-guard': [
include('spaces'),
(r'(?:if)\b', Keyword, ('#pop', 'parenthesis', 'parenthesis-open')),
(r'', Text, '#pop'),
],
# optional multiple expr under a case
'case-block': [
include('spaces'),
(r'(?!(?:case|default)\b|\})', Keyword, 'expr-statement'),
(r'', Text, '#pop'),
],
'new': [
include('spaces'),
(r'', Text, ('#pop', 'call', 'parenthesis-open', 'type')),
],
'array-decl': [
include('spaces'),
(r'\]', Punctuation, '#pop'),
(r'', Text, ('#pop', 'array-decl-sep', 'expr')),
],
'array-decl-sep': [
include('spaces'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'array-decl')),
],
'array-access': [
include('spaces'),
(r'', Text, ('#pop', 'array-access-close', 'expr')),
],
'array-access-close': [
include('spaces'),
(r'\]', Punctuation, '#pop'),
],
'comma': [
include('spaces'),
(r',', Punctuation, '#pop'),
],
'colon': [
include('spaces'),
(r':', Punctuation, '#pop'),
],
'semicolon': [
include('spaces'),
(r';', Punctuation, '#pop'),
],
'optional-semicolon': [
include('spaces'),
(r';', Punctuation, '#pop'),
(r'', Text, '#pop'),
],
# identity that CAN be a Haxe keyword
'ident': [
include('spaces'),
(ident, Name, '#pop'),
],
'dollar': [
include('spaces'),
(r'\{', Keyword, ('#pop', 'bracket-close', 'expr')),
(r'', Text, ('#pop', 'expr-chain')),
],
'type-name': [
include('spaces'),
(typeid, Name, '#pop'),
],
'type-full-name': [
include('spaces'),
(r'\.', Punctuation, 'ident'),
(r'', Text, '#pop'),
],
'type': [
include('spaces'),
(r'\?', Punctuation),
(ident, Name, ('#pop', 'type-check', 'type-full-name')),
(r'\{', Punctuation, ('#pop', 'type-check', 'type-struct')),
(r'\(', Punctuation, ('#pop', 'type-check', 'type-parenthesis')),
],
'type-parenthesis': [
include('spaces'),
(r'', Text, ('#pop', 'parenthesis-close', 'type')),
],
'type-check': [
include('spaces'),
(r'->', Punctuation, ('#pop', 'type')),
(r'<(?!=)', Punctuation, 'type-param'),
(r'', Text, '#pop'),
],
'type-struct': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
(r'\?', Punctuation),
(r'>', Punctuation, ('comma', 'type')),
(ident_no_keyword, Name, ('#pop', 'type-struct-sep', 'type', 'colon')),
include('class-body'),
],
'type-struct-sep': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'type-struct')),
],
# type-param can be a normal type or a constant literal...
'type-param-type': [
# Float
(r'\.[0-9]+', Number.Float, '#pop'),
(r'[0-9]+[eE][\+\-]?[0-9]+', Number.Float, '#pop'),
(r'[0-9]+\.[0-9]*[eE][\+\-]?[0-9]+', Number.Float, '#pop'),
(r'[0-9]+\.[0-9]+', Number.Float, '#pop'),
(r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, '#pop'),
# Int
(r'0x[0-9a-fA-F]+', Number.Hex, '#pop'),
(r'[0-9]+', Number.Integer, '#pop'),
# String
(r"'", String.Single, ('#pop', 'string-single')),
(r'"', String.Double, ('#pop', 'string-double')),
# EReg
(r'~/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex, '#pop'),
# Array
(r'\[', Operator, ('#pop', 'array-decl')),
include('type'),
],
# type-param part of a type
# ie. the <A,B> path in Map<A,B>
'type-param': [
include('spaces'),
(r'', Text, ('#pop', 'type-param-sep', 'type-param-type')),
],
'type-param-sep': [
include('spaces'),
(r'>', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'type-param')),
],
# optional type-param that may include constraint
# ie. <T:Constraint, T2:(ConstraintA,ConstraintB)>
'type-param-constraint': [
include('spaces'),
(r'<(?!=)', Punctuation, ('#pop', 'type-param-constraint-sep',
'type-param-constraint-flag', 'type-name')),
(r'', Text, '#pop'),
],
'type-param-constraint-sep': [
include('spaces'),
(r'>', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'type-param-constraint-sep',
'type-param-constraint-flag', 'type-name')),
],
# the optional constraint inside type-param
'type-param-constraint-flag': [
include('spaces'),
(r':', Punctuation, ('#pop', 'type-param-constraint-flag-type')),
(r'', Text, '#pop'),
],
'type-param-constraint-flag-type': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'type-param-constraint-flag-type-sep',
'type')),
(r'', Text, ('#pop', 'type')),
],
'type-param-constraint-flag-type-sep': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation, 'type'),
],
# a parenthesis expr that contain exactly one expr
'parenthesis': [
include('spaces'),
(r'', Text, ('#pop', 'parenthesis-close', 'expr')),
],
'parenthesis-open': [
include('spaces'),
(r'\(', Punctuation, '#pop'),
],
'parenthesis-close': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
],
'var': [
include('spaces'),
(ident_no_keyword, Text, ('#pop', 'var-sep', 'assign', 'flag')),
],
# optional more var decl.
'var-sep': [
include('spaces'),
(r',', Punctuation, ('#pop', 'var')),
(r'', Text, '#pop'),
],
# optional assignment
'assign': [
include('spaces'),
(r'=', Operator, ('#pop', 'expr')),
(r'', Text, '#pop'),
],
# optional type flag
'flag': [
include('spaces'),
(r':', Punctuation, ('#pop', 'type')),
(r'', Text, '#pop'),
],
# colon as part of a ternary operator (?:)
'ternary': [
include('spaces'),
(r':', Operator, '#pop'),
],
# function call
'call': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
(r'', Text, ('#pop', 'call-sep', 'expr')),
],
# after a call param
'call-sep': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'call')),
],
# bracket can be block or object
'bracket': [
include('spaces'),
(r'(?!(?:\$\s*[a-z]\b|\$(?!'+ident+')))' + ident_no_keyword, Name,
('#pop', 'bracket-check')),
(r"'", String.Single, ('#pop', 'bracket-check', 'string-single')),
(r'"', String.Double, ('#pop', 'bracket-check', 'string-double')),
(r'', Text, ('#pop', 'block')),
],
'bracket-check': [
include('spaces'),
(r':', Punctuation, ('#pop', 'object-sep', 'expr')), #is object
(r'', Text, ('#pop', 'block', 'optional-semicolon', 'expr-chain')), #is block
],
# code block
'block': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
(r'', Text, 'expr-statement'),
],
# object in key-value pairs
'object': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
(r'', Text, ('#pop', 'object-sep', 'expr', 'colon', 'ident-or-string'))
],
# a key of an object
'ident-or-string': [
include('spaces'),
(ident_no_keyword, Name, '#pop'),
(r"'", String.Single, ('#pop', 'string-single')),
(r'"', String.Double, ('#pop', 'string-double')),
],
# after a key-value pair in object
'object-sep': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'object')),
],
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text): return 0.3
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
*New in Pygments 1.3.*
"""
name = 'Haml'
aliases = ['haml', 'HAML']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Text),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Name.Constant),
(r'(indigo|gold|firebrick|indianred|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|skyblue|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|orchid|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|palegreen|burlywood|'
r'seashell|mediumspringgreen|papayawhip|blanchedalmond|'
r'peru|aquamarine|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|darkgoldenrod|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|'
r'blueviolet|peachpuff)\b', Name.Entity),
(r'(black|silver|gray|white|maroon|red|purple|fuchsia|green|'
r'lime|olive|yellow|navy|blue|teal|aqua)\b', Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#{', String.Interpol, 'interpolation'),
(r'[~\^\*!&%<>\|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~\^\*!&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
*New in Pygments 1.3.*
"""
name = 'Sass'
aliases = ['sass', 'SASS']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
(r'', Text, 'selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'\S+', String),
(r'\n', Text, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'value'),
(r'', Text, 'value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*[=:]', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
(r'', Text, 'selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
class CoffeeScriptLexer(RegexLexer):
"""
For `CoffeeScript`_ source code.
.. _CoffeeScript: http://coffeescript.org
*New in Pygments 1.3.*
"""
name = 'CoffeeScript'
aliases = ['coffee-script', 'coffeescript', 'coffee']
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'multilineregex': [
(r'[^/#]+', String.Regex),
(r'///([gim]+\b|\B)', String.Regex, '#pop'),
(r'#{', String.Interpol, 'interpoling_string'),
(r'[/#]', String.Regex),
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'///', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'(?:\([^()]+\))?\s*[=-]>', Name.Function),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|'
r'loop|break|return|continue|'
r'switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by)\b', Keyword, 'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|null|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\$]*', Name.Other, 'slashstartsregex'),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class LiveScriptLexer(RegexLexer):
"""
For `LiveScript`_ source code.
.. _LiveScript: http://gkz.github.com/LiveScript/
New in Pygments 1.6.
"""
name = 'LiveScript'
aliases = ['live-script', 'livescript']
filenames = ['*.ls']
mimetypes = ['text/livescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'#.*?\n', Comment.Single),
],
'multilineregex': [
include('commentsandwhitespace'),
(r'//([gim]+\b|\B)', String.Regex, '#pop'),
(r'/', String.Regex),
(r'[^/#]+', String.Regex)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'//', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|'
r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function),
(r'\+\+|&&|(?<![\.\$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|'
r'[+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|loop|break|'
r'return|continue|switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by|const|var|to|til)\b', Keyword,
'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|'
r'null|NaN|Infinity|undefined|void)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\-]*', Name.Other, 'slashstartsregex'),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float),
(r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
(r'\\[\w$-]+', String),
(r'<\[.*\]>', String),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class DuelLexer(RegexLexer):
"""
Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks.
See http://duelengine.org/.
See http://jsonml.org/jbst/.
*New in Pygments 1.4.*
"""
name = 'Duel'
aliases = ['duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST']
filenames = ['*.duel','*.jbst']
mimetypes = ['text/x-duel','text/x-jbst']
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#!:]?)(.*?)(%>)',
bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)),
(r'(<%\$)(.*?)(:)(.*?)(%>)',
bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)),
(r'(<%--)(.*?)(--%>)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)',
bygroups(using(HtmlLexer),
using(JavascriptLexer), using(HtmlLexer))),
(r'(.+?)(?=<)', using(HtmlLexer)),
(r'.+', using(HtmlLexer)),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
*New in Pygments 1.4.*
"""
name = 'Scaml'
aliases = ['scaml', 'SCAML']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class JadeLexer(ExtendedRegexLexer):
"""
For Jade markup.
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
*New in Pygments 1.4.*
"""
name = 'Jade'
aliases = ['jade', 'JADE']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class XQueryLexer(ExtendedRegexLexer):
"""
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
*New in Pygments 1.4.*
"""
name = 'XQuery'
aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm']
filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm']
mimetypes = ['text/xquery', 'application/xquery']
xquery_parse_state = []
# FIX UNICODE LATER
#ncnamestartchar = (
# ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
# ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
# ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
# ur"[\u10000-\uEFFFF]"
#)
ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
# FIX UNICODE LATER
#ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
# ur"[\u203F-\u2040]")
ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
pitarget_namestartchar = r"(?:[A-KN-WY-Z]|_|:|[a-kn-wy-z])"
pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
prefixedname = "%s:%s" % (ncname, ncname)
unprefixedname = ncname
qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
# FIX UNICODE LATER
#elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
# ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_`\|~]'
# CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
# aposattrcontentchar
#x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
flags = re.DOTALL | re.MULTILINE | re.UNICODE
def punctuation_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def operator_root_callback(lexer, match, ctx):
yield match.start(), Operator, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def popstate_tag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
next_state = lexer.xquery_parse_state.pop()
if next_state == 'occurrenceindicator':
if re.match("[?*+]+", match.group(2)):
yield match.start(), Punctuation, match.group(2)
ctx.stack.append('operator')
ctx.pos = match.end()
else:
ctx.stack.append('operator')
ctx.pos = match.end(1)
else:
ctx.stack.append(next_state)
ctx.pos = match.end(1)
def popstate_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# if we have run out of our state stack, pop whatever is on the pygments
# state stack
if len(lexer.xquery_parse_state) == 0:
ctx.stack.pop()
elif len(ctx.stack) > 1:
ctx.stack.append(lexer.xquery_parse_state.pop())
else:
# i don't know if i'll need this, but in case, default back to root
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_element_content_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('element_content')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.pos = match.end()
def pushstate_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_order_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate_withmode(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Keyword, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('kindtest')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtestforpi')
ctx.pos = match.end()
def pushstate_operator_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('occurrenceindicator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_root_construct_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
cur_state = ctx.stack.pop()
lexer.xquery_parse_state.append(cur_state)
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_attribute_callback(lexer, match, ctx):
yield match.start(), Name.Attribute, match.group(1)
ctx.stack.append('operator')
ctx.pos = match.end()
def pushstate_operator_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
tokens = {
'comment': [
# xquery comments
(r'(:\))', Comment, '#pop'),
(r'(\(:)', Comment, '#push'),
(r'[^:)]', Comment),
(r'([^:)]|:|\))', Comment),
],
'whitespace': [
(r'\s+', Text),
],
'operator': [
include('whitespace'),
(r'(\})', popstate_callback),
(r'\(:', Comment, 'comment'),
(r'(\{)', pushstate_root_callback),
(r'then|else|external|at|div|except', Keyword, 'root'),
(r'order by', Keyword, 'root'),
(r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
(r'return|satisfies|to|union|where|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\||:=|=)',
operator_root_callback),
(r'(::|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(instance)(\s+)(of)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(treat)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(case|as)\b', Keyword, 'itemtype'),
(r'(\))(\s*)(as)',
bygroups(Punctuation, Text, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
(r'(for|let)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
#(r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
(r'external', Keyword),
(r'collation', Keyword, 'uritooperator'),
# finally catch all string literals and stay in operator state
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'(catch)(\s*)', bygroups(Keyword, Text), 'root'),
],
'uritooperator': [
(stringdouble, String.Double, '#pop'),
(stringsingle, String.Single, '#pop'),
],
'namespacedecl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)),
(r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r',', Punctuation),
(r'=', Operator),
(r';', Punctuation, 'root'),
(ncname, Name.Namespace),
],
'namespacekeyword': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double, 'namespacedecl'),
(stringsingle, String.Single, 'namespacedecl'),
(r'inherit|no-inherit', Keyword, 'root'),
(r'namespace', Keyword, 'namespacedecl'),
(r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
(r'preserve|no-preserve', Keyword),
(r',', Punctuation),
],
'varname': [
(r'\(:', Comment, 'comment'),
(qname, Name.Variable, 'operator'),
],
'singletype': [
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
],
'itemtype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\$', Punctuation, 'varname'),
(r'(void)(\s*)(\()(\s*)(\))',
bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
(r'(element|attribute|schema-element|schema-attribute|comment|text|'
r'node|binary|document-node|empty-sequence)(\s*)(\()',
pushstate_occurrenceindicator_kindtest_callback),
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
(r'\(\#', Punctuation, 'pragma'),
(r';', Punctuation, '#pop'),
(r'then|else', Keyword, '#pop'),
(r'(at)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
(r'except|intersect|in|is|return|satisfies|to|union|where',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|', Operator, 'root'),
(r'external|at', Keyword, 'root'),
(r'(stable)(\s+)(order)(\s+)(by)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
(r'(castable|cast)(\s+)(as)',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
(r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
(r'\(:', Comment, 'comment'),
(r'{', Punctuation, 'root'),
(r'(\))([*+?]?)', popstate_kindtest_callback),
(r'\*', Name, 'closekindtest'),
(qname, Name, 'closekindtest'),
(r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
],
'kindtestforpi': [
(r'\(:', Comment, 'comment'),
(r'\)', Punctuation, '#pop'),
(ncname, Name.Variable),
(stringdouble, String.Double),
(stringsingle, String.Single),
],
'closekindtest': [
(r'\(:', Comment, 'comment'),
(r'(\))', popstate_callback),
(r',', Punctuation),
(r'(\{)', pushstate_operator_root_callback),
(r'\?', Punctuation),
],
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
(r'\s+', Text, 'processing_instruction_content'),
(r'\?>', String.Doc, '#pop'),
(pitarget, Name),
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
include('whitespace'),
(r'(/>)', popstate_tag_callback),
(r'>', Name.Tag, 'element_content'),
(r'"', Punctuation, 'quot_attribute_content'),
(r"'", Punctuation, 'apos_attribute_content'),
(r'=', Operator),
(qname, Name.Tag),
],
'quot_attribute_content': [
(r'"', Punctuation, 'start_tag'),
(r'(\{)', pushstate_root_callback),
(r'""', Name.Attribute),
(quotattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'apos_attribute_content': [
(r"'", Punctuation, 'start_tag'),
(r'\{', Punctuation, 'root'),
(r"''", Name.Attribute),
(aposattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'element_content': [
(r'</', Name.Tag, 'end_tag'),
(r'(\{)', pushstate_root_callback),
(r'(<!--)', pushstate_element_content_xmlcomment_callback),
(r'(<\?)', pushstate_element_content_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
(r'(<)', pushstate_element_content_starttag_callback),
(elementcontentchar, Literal),
(entityref, Literal),
(charref, Literal),
(r'\{\{|\}\}', Literal),
],
'end_tag': [
include('whitespace'),
(r'(>)', popstate_tag_callback),
(qname, Name.Tag),
],
'xmlspace_decl': [
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
'declareordering': [
(r'\(:', Comment, 'comment'),
include('whitespace'),
(r'ordered|unordered', Keyword, '#pop'),
],
'xqueryversion': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'encoding', Keyword),
(r';', Punctuation, '#pop'),
],
'pragma': [
(qname, Name.Variable, 'pragmacontents'),
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
'occurrenceindicator': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\*|\?|\+', Operator, 'operator'),
(r':=', Operator, 'root'),
(r'', Text, 'operator'),
],
'option': [
include('whitespace'),
(qname, Name.Variable, '#pop'),
],
'qname_braren': [
include('whitespace'),
(r'(\{)', pushstate_operator_root_callback),
(r'(\()', Punctuation, 'root'),
],
'element_qname': [
(qname, Name.Variable, 'root'),
],
'attribute_qname': [
(qname, Name.Variable, 'root'),
],
'root': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
# handle operator state
# order on numbers matters - handle most complex first
(r'\d+(\.\d*)?[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+)[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+|\d+\.\d*)', Number, 'operator'),
(r'(\d+)', Number.Integer, 'operator'),
(r'(\.\.|\.|\))', Punctuation, 'operator'),
(r'(declare)(\s+)(construction)',
bygroups(Keyword, Text, Keyword), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'),
(ncname + ':\*', Name, 'operator'),
('\*:'+ncname, Name.Tag, 'operator'),
('\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
(r'(\})', popstate_callback),
#NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(module|declare)(\s+)(namespace)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
(r'(declare)(\s+)(base-uri)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
#NAMESPACE KEYWORD
(r'(declare)(\s+)(default)(\s+)(element|function)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'),
(r'(import)(\s+)(schema|module)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'),
(r'(declare)(\s+)(copy-namespaces)',
bygroups(Keyword, Text, Keyword), 'namespacekeyword'),
#VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
#ITEMTYPE
(r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(r'(element|attribute|schema-element|schema-attribute|comment|'
r'text|node|document-node|empty-sequence)(\s+)(\()',
pushstate_operator_kindtest_callback),
(r'(processing-instruction)(\s+)(\()',
pushstate_operator_kindtestforpi_callback),
(r'(<!--)', pushstate_operator_xmlcomment_callback),
(r'(<\?)', pushstate_operator_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
# (r'</', Name.Tag, 'end_tag'),
(r'(<)', pushstate_operator_starttag_callback),
(r'(declare)(\s+)(boundary-space)',
bygroups(Keyword, Text, Keyword), 'xmlspace_decl'),
(r'(validate)(\s+)(lax|strict)',
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(element|attribute)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
pushstate_operator_root_construct_callback),
#ATTRIBUTE
(r'(attribute)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'attribute_qname'),
#ELEMENT
(r'(element)(\s+)(?=' +qname+ r')',
bygroups(Keyword, Text), 'element_qname'),
#PROCESSING_INSTRUCTION
(r'(processing-instruction)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Text, Name.Variable, Text, Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword, Text, Keyword)),
(r'(\{)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
(r'(declare)(\s+)(ordering)',
bygroups(Keyword, Text, Keyword), 'declareordering'),
(r'(xquery)(\s+)(version)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'xqueryversion'),
(r'(\(#)', Punctuation, 'pragma'),
# sometimes return can occur in root state
(r'return', Keyword),
(r'(declare)(\s+)(option)', bygroups(Keyword, Text, Keyword),
'option'),
#URI LITERALS - single and double quoted
(r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
(r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
(r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
bygroups(Keyword, Punctuation)),
(r'(descendant|following-sibling|following|parent|preceding-sibling'
r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'then|else', Keyword),
# ML specific
(r'(try)(\s*)', bygroups(Keyword, Text), 'root'),
(r'(catch)(\s*)(\()(\$)',
bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'),
(r'(@'+qname+')', Name.Attribute),
(r'(@'+ncname+')', Name.Attribute),
(r'@\*:'+ncname, Name.Attribute),
(r'(@)', Name.Attribute),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
# STANDALONE QNAMES
(qname + r'(?=\s*{)', Name.Tag, 'qname_braren'),
(qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
(qname, Name.Tag, 'operator'),
]
}
class DartLexer(RegexLexer):
"""
For `Dart <http://dartlang.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Dart'
aliases = ['dart']
filenames = ['*.dart']
mimetypes = ['text/x-dart']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('string_literal'),
(r'#!(.*?)$', Comment.Preproc),
(r'\b(import|export)\b', Keyword, 'import_decl'),
(r'\b(library|source|part of|part)\b', Keyword),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'\b(class)\b(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'\b(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|in|is|new|return|super|switch|this|throw|try|while)\b',
Keyword),
(r'\b(abstract|const|extends|factory|final|get|implements|'
r'native|operator|set|static|typedef|var)\b', Keyword.Declaration),
(r'\b(bool|double|Dynamic|int|num|Object|String|void)\b', Keyword.Type),
(r'\b(false|null|true)\b', Keyword.Constant),
(r'[~!%^&*+=|?:<>/-]|as', Operator),
(r'[a-zA-Z_$][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
(r'[(){}\[\],.;]', Punctuation),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# DIGIT+ (‘.’ DIGIT*)? EXPONENT?
(r'\d+(\.\d*)?([eE][+-]?\d+)?', Number),
(r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT?
(r'\n', Text)
# pseudo-keyword negate intentionally left out
],
'class': [
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import_decl': [
include('string_literal'),
(r'\s+', Text),
(r'\b(as|show|hide)\b', Keyword),
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
(r'\,', Punctuation),
(r'\;', Punctuation, '#pop')
],
'string_literal': [
# Raw strings.
(r'r"""([\s|\S]*?)"""', String.Double),
(r"r'''([\s|\S]*?)'''", String.Single),
(r'r"(.*?)"', String.Double),
(r"r'(.*?)'", String.Single),
# Normal Strings.
(r'"""', String.Double, 'string_double_multiline'),
(r"'''", String.Single, 'string_single_multiline'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single')
],
'string_common': [
(r"\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|u\{[0-9A-Fa-f]*\}|[a-z\'\"$\\])",
String.Escape),
(r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol))
],
'string_double': [
(r'"', String.Double, '#pop'),
(r'[^\"$\\\n]+', String.Double),
include('string_common'),
(r'\$+', String.Double)
],
'string_double_multiline': [
(r'"""', String.Double, '#pop'),
(r'[^\"$\\]+', String.Double),
include('string_common'),
(r'(\$|\")+', String.Double)
],
'string_single': [
(r"'", String.Single, '#pop'),
(r"[^\'$\\\n]+", String.Single),
include('string_common'),
(r'\$+', String.Single)
],
'string_single_multiline': [
(r"'''", String.Single, '#pop'),
(r'[^\'$\\]+', String.Single),
include('string_common'),
(r'(\$|\')+', String.Single)
]
}
class TypeScriptLexer(RegexLexer):
"""
For `TypeScript <http://www.python.org>`_ source code.
*New in Pygments 1.6.*
"""
name = 'TypeScript'
aliases = ['ts']
filenames = ['*.ts']
mimetypes = ['text/x-typescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
# Match stuff like: module name {...}
(r'\b(module)(\s*)(\s*[a-zA-Z0-9_?.$][\w?.$]*)(\s*)',
bygroups(Keyword.Reserved, Text, Name.Other, Text), 'slashstartsregex'),
# Match variable type keywords
(r'\b(string|bool|number)\b', Keyword.Type),
# Match stuff like: constructor
(r'\b(constructor|declare|interface|as|AS)\b', Keyword.Reserved),
# Match stuff like: super(argument, list)
(r'(super)(\s*)(\([a-zA-Z0-9,_?.$\s]+\s*\))',
bygroups(Keyword.Reserved, Text), 'slashstartsregex'),
# Match stuff like: function() {...}
(r'([a-zA-Z_?.$][\w?.$]*)\(\) \{', Name.Other, 'slashstartsregex'),
# Match stuff like: (function: return type)
(r'([a-zA-Z0-9_?.$][\w?.$]*)(\s*:\s*)([a-zA-Z0-9_?.$][\w?.$]*)',
bygroups(Name.Other, Text, Keyword.Type)),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class LassoLexer(RegexLexer):
"""
For `Lasso <http://www.lassosoft.com/>`_ source code, covering both Lasso 9
syntax and LassoScript for Lasso 8.6 and earlier. For Lasso embedded in
HTML, use the `LassoHtmlLexer`.
Additional options accepted:
`builtinshighlighting`
If given and ``True``, highlight builtin types, traits, methods, and
members (default: ``True``).
`requiredelimiters`
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
*New in Pygments 1.6.*
"""
name = 'Lasso'
aliases = ['lasso', 'lassoscript']
filenames = ['*.lasso', '*.lasso[89]']
alias_filenames = ['*.incl', '*.inc', '*.las']
mimetypes = ['text/x-lasso']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'^#!.+lasso9\b', Comment.Preproc, 'lasso'),
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, ('delimiters', 'noprocess')),
(r'\[', Comment.Preproc, ('delimiters', 'squarebrackets')),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc,
('delimiters', 'anglebrackets')),
(r'<', Other, 'delimiters'),
(r'\s+', Other),
(r'', Other, ('delimiters', 'lassofile')),
],
'delimiters': [
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Comment.Preproc, 'squarebrackets'),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^[<]+', Other),
],
'nosquarebrackets': [
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^<]+', Other),
],
'noprocess': [
(r'\[/noprocess\]', Comment.Preproc, '#pop'),
(r'\[', Other),
(r'[^[]', Other),
],
'squarebrackets': [
(r'\]', Comment.Preproc, '#pop'),
include('lasso'),
],
'anglebrackets': [
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'lassofile': [
(r'\]', Comment.Preproc, '#pop'),
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'whitespacecomments': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*\*!.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
],
'lasso': [
# whitespace/comments
include('whitespacecomments'),
# literals
(r'\d*\.\d+(e[+-]?\d+)?', Number.Float),
(r'0x[\da-f]+', Number.Hex),
(r'\d+', Number.Integer),
(r'([+-]?)(infinity|NaN)\b', bygroups(Operator, Number)),
(r"'", String.Single, 'singlestring'),
(r'"', String.Double, 'doublestring'),
(r'`[^`]*`', String.Backtick),
# names
(r'\$[a-z_][\w.]*', Name.Variable),
(r'#([a-z_][\w.]*|\d+)', Name.Variable.Instance),
(r"(\.)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Name.Variable.Class)),
(r"(self)(\s*->\s*)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)),
(r'(\.\.?)([a-z_][\w.]*)',
bygroups(Name.Builtin.Pseudo, Name.Other.Member)),
(r'(->\\?\s*|&\s*)([a-z_][\w.]*)',
bygroups(Operator, Name.Other.Member)),
(r'(self|inherited|global|void)\b', Name.Builtin.Pseudo),
(r'-[a-z_][\w.]*', Name.Attribute),
(r'(::\s*)([a-z_][\w.]*)', bygroups(Punctuation, Name.Label)),
(r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
r'Error_InvalidDatabase|Error_InvalidPassword|'
r'Error_InvalidUsername|Error_ModuleNotFound|'
r'Error_NoError|Error_NoPermission|Error_OutOfMemory|'
r'Error_ReqColumnMissing|Error_ReqFieldMissing|'
r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|'
r'Error_UpdateError)\b', Name.Exception),
# definitions
(r'(define)(\s+)([a-z_][\w.]*)(\s*=>\s*)(type|trait|thread)\b',
bygroups(Keyword.Declaration, Text, Name.Class, Operator, Keyword)),
(r'(define)(\s+)([a-z_][\w.]*)(\s*->\s*)([a-z_][\w.]*=?|[-+*/%<>]|==)',
bygroups(Keyword.Declaration, Text, Name.Class, Operator,
Name.Function), 'signature'),
(r'(define)(\s+)([a-z_][\w.]*)',
bygroups(Keyword.Declaration, Text, Name.Function), 'signature'),
(r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|'
r'[-+*/%<>]|==)(?=\s*\())', bygroups(Keyword, Text, Name.Function),
'signature'),
(r'(public|protected|private)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Text, Name.Function)),
# keywords
(r'(true|false|none|minimal|full|all)\b', Keyword.Constant),
(r'(local|var|variable|data(?=\s))\b', Keyword.Declaration),
(r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|'
r'null|list|queue|set|stack|staticarray)\b', Keyword.Type),
(r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Text, Keyword)),
(r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Text, Name)),
(r'require\b', Keyword, 'requiresection'),
(r'(/?)(Namespace_Using)\b', bygroups(Punctuation, Keyword.Namespace)),
(r'(/?)(Cache|Database_Names|Database_SchemaNames|'
r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|'
r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|'
r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|'
r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|'
r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|'
r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|'
r'NoProcess|Output_None|Portal|Private|Protect|Records|Referer|'
r'Referrer|Repeating|ResultSet|Rows|Search_Args|Search_Arguments|'
r'Select|Sort_Args|Sort_Arguments|Thread_Atomic|Value_List|While|'
r'Abort|Case|Else|If_Empty|If_False|If_Null|If_True|Loop_Abort|'
r'Loop_Continue|Loop_Count|Params|Params_Up|Return|Return_Value|'
r'Run_Children|SOAP_DefineTag|SOAP_LastRequest|SOAP_LastResponse|'
r'Tag_Name|ascending|average|by|define|descending|do|equals|'
r'frozen|group|handle_failure|import|in|into|join|let|match|max|'
r'min|on|order|parent|protected|provide|public|require|skip|'
r'split_thread|sum|take|thread|to|trait|type|where|with|yield)\b',
bygroups(Punctuation, Keyword)),
# other
(r',', Punctuation, 'commamember'),
(r'(and|or|not)\b', Operator.Word),
(r'([a-z_][\w.]*)(\s*::\s*)?([a-z_][\w.]*)?(\s*=(?!=))',
bygroups(Name, Punctuation, Name.Label, Operator)),
(r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)),
(r'(=)(bw|ew|cn|lte?|gte?|n?eq|ft|n?rx)\b',
bygroups(Operator, Operator.Word)),
(r':=|[-+*/%=<>&|!?\\]+', Operator),
(r'[{}():;,@^]', Punctuation),
],
'singlestring': [
(r"'", String.Single, '#pop'),
(r"[^'\\]+", String.Single),
include('escape'),
(r"\\+", String.Single),
],
'doublestring': [
(r'"', String.Double, '#pop'),
(r'[^"\\]+', String.Double),
include('escape'),
(r'\\+', String.Double),
],
'escape': [
(r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|'
r'[abefnrtv?\"\'\\]|$)', String.Escape),
],
'signature': [
(r'=>', Operator, '#pop'),
(r'\)', Punctuation, '#pop'),
(r'[(,]', Punctuation, 'parameter'),
include('lasso'),
],
'parameter': [
(r'\)', Punctuation, '#pop'),
(r'-?[a-z_][\w.]*', Name.Attribute, '#pop'),
(r'\.\.\.', Name.Builtin.Pseudo),
include('lasso'),
],
'requiresection': [
(r'(([a-z_][\w.]*=?|[-+*/%<>]|==)(?=\s*\())', Name, 'requiresignature'),
(r'(([a-z_][\w.]*=?|[-+*/%<>]|==)(?=(\s*::\s*[\w.]+)?\s*,))', Name),
(r'[a-z_][\w.]*=?|[-+*/%<>]|==', Name, '#pop'),
(r'(::\s*)([a-z_][\w.]*)', bygroups(Punctuation, Name.Label)),
(r',', Punctuation),
include('whitespacecomments'),
],
'requiresignature': [
(r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'),
(r'\)', Punctuation, '#pop:2'),
(r'-?[a-z_][\w.]*', Name.Attribute),
(r'(::\s*)([a-z_][\w.]*)', bygroups(Punctuation, Name.Label)),
(r'\.\.\.', Name.Builtin.Pseudo),
(r'[(,]', Punctuation),
include('whitespacecomments'),
],
'commamember': [
(r'(([a-z_][\w.]*=?|[-+*/%<>]|==)'
r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))',
Name.Function, 'signature'),
include('whitespacecomments'),
(r'', Text, '#pop'),
],
}
def __init__(self, **options):
self.builtinshighlighting = get_bool_opt(
options, 'builtinshighlighting', True)
self.requiredelimiters = get_bool_opt(
options, 'requiredelimiters', False)
self._builtins = set()
self._members = set()
if self.builtinshighlighting:
from pygments.lexers._lassobuiltins import BUILTINS, MEMBERS
for key, value in BUILTINS.iteritems():
self._builtins.update(value)
for key, value in MEMBERS.iteritems():
self._members.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.requiredelimiters:
stack.append('delimiters')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if (token is Name.Other and value.lower() in self._builtins or
token is Name.Other.Member and value.lower() in self._members):
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if 'bin/lasso9' in text:
rv += 0.8
if re.search(r'<\?(=|lasso)|\A\[', text, re.I):
rv += 0.4
if re.search(r'local\(', text, re.I):
rv += 0.4
if '?>' in text:
rv += 0.1
return rv
class QmlLexer(RegexLexer):
"""
For QML files. See http://doc.qt.digia.com/4.7/qdeclarativeintroduction.html.
*New in Pygments 1.6.*
"""
# QML is based on javascript, so much of this is taken from the
# JavascriptLexer above.
name = 'QML'
aliases = ['qml', 'Qt Meta Language', 'Qt modeling Language']
filenames = ['*.qml',]
mimetypes = [ 'application/x-qml',]
# pasted from JavascriptLexer, with some additions
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root' : [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
# QML insertions
(r'\bid\s*:\s*[A-Za-z][_A-Za-z.0-9]*',Keyword.Declaration,
'slashstartsregex'),
(r'\b[A-Za-z][_A-Za-z.0-9]*\s*:',Keyword, 'slashstartsregex'),
# the rest from JavascriptLexer
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
|
mark-ignacio/py-fakename | refs/heads/master | fakename/wrapper.py | 1 | from datetime import datetime
import requests
# noinspection PyUnresolvedReferences
from fakename.six.moves.html_parser import HTMLParser
# noinspection PyUnresolvedReferences
from fakename.six.moves.urllib.parse import urljoin
from fakename.six import PY3, PY2
from fakename import __version__
DOMAIN = 'https://fakena.me/'
# construct a whole bunch of other urls
RANDOM_URL = urljoin(DOMAIN, 'random/')
session = requests.Session()
session.headers['User-Agent'] = 'py-fakename-' + __version__
# noinspection PyAttributeOutsideInit
class PageParser(HTMLParser):
# whole lotta' state
# noinspection PyCompatibility,PyArgumentList
def reset(self):
if PY3:
super().reset()
elif PY2:
# old style invocation for an old style class
HTMLParser.reset(self)
self.in_tr = False
self.in_td = False
self.tr_key = None
self.tr_value = None
self.in_anchor = False
self.anchor_href = None
self.identity = {}
# essentially, we're waiting until we reach the table body to scrape data
def handle_starttag(self, tag, attrs):
if tag == 'tr':
self.in_tr = True
elif tag == 'td':
self.in_td = True
elif tag == 'a':
self.in_anchor = True
self.anchor_href = dict(attrs)['href']
def handle_data(self, data):
# handle_data will only be called once per textNode since we receive the entire page beforehand
if self.in_td:
if self.tr_key is None:
self.tr_key = data.strip()
elif self.tr_key and self.tr_value is None:
self.tr_value = data.strip()
elif self.in_anchor and data.strip() == 'Permalink for this profile':
self.identity['permalink'] = urljoin(DOMAIN, self.anchor_href)
def handle_endtag(self, tag):
if tag == 'td':
self.in_td = False
elif tag == 'tr':
self.in_tr = False
if self.tr_key and self.tr_value:
self.identity[self.tr_key] = self.tr_value
self.tr_key = None
self.tr_value = None
elif tag == 'a':
self.in_anchor = False
self.anchor_href = None
PARSER = PageParser()
def gen_identity(process=True):
"""
Gets an identity generated from https://fakena.me
The `process` argument decides whether to do any post-processing on parsed out results at all; this is intended for
debugging and developmental use only.
:param process: whether to process results at all
:return: dictionary containing an identity
"""
page = session.get(RANDOM_URL, verify=True).text
# python 3.2 workaround
page = page.replace('<a href=\\"https://twitter.com/fakena_me\\">', '<a href="https://twitter.com/fakena_me">')
page = page.replace('<a href=\\"bitcoin:17LfTRzWG6xF9nmErRLG3gtQk1vH3u3jBq\\">', '<a href="bitcoin:17LfTRzWG6xF9nmErRLG3gtQk1vH3u3jBq">')
PARSER.reset()
PARSER.feed(page)
ugly = PARSER.identity
# no processing whatsoever - even includes the colons!
if not process:
return ugly
# make it pythonic and parse it out a bit for normal wrapper output
# namely, work on the DOB + city/state/zip
# sample: "Malone, KY 41451"
city, rest = ugly['City, State, ZIP:'].rsplit(',', 1)
state, zip_code = rest.strip().split(' ', 2)
city = city.rstrip(',')
dob = datetime.strptime(ugly['Date of Birth:'], '%Y-%m-%d').date()
identity = {
'name': ugly['Name:'],
'dob': dob,
'address': ugly['Street Address:'],
'city': city,
'state': state,
'zip': zip_code,
'phone': ugly['Phone Number:'],
'username': ugly['Username:'],
'password': ugly['Password:'],
'temp_email': ugly['Temporary Email Address:'],
'permalink': ugly['permalink']
}
return identity
|
efiring/scipy | refs/heads/master | scipy/special/orthogonal.py | 5 | """
A collection of functions to find the weights and abscissas for
Gaussian Quadrature.
These calculations are done by finding the eigenvalues of a
tridiagonal matrix whose entries are dependent on the coefficients
in the recursion formula for the orthogonal polynomials with the
corresponding weighting function over the interval.
Many recursion relations for orthogonal polynomials are given:
.. math::
a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x)
The recursion relation of interest is
.. math::
P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x)
where :math:`P` has a different normalization than :math:`f`.
The coefficients can be found as:
.. math::
A_n = -a2n / a3n
\\qquad
B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2
where
.. math::
h_n = \\int_a^b w(x) f_n(x)^2
assume:
.. math::
P_0 (x) = 1
\\qquad
P_{-1} (x) == 0
For the mathematical background, see [golub.welsch-1969-mathcomp]_ and
[abramowitz.stegun-1965]_.
Functions::
gen_roots_and_weights -- Generic roots and weights.
j_roots -- Jacobi
js_roots -- Shifted Jacobi
la_roots -- Generalized Laguerre
h_roots -- Hermite
he_roots -- Hermite (unit-variance)
cg_roots -- Ultraspherical (Gegenbauer)
t_roots -- Chebyshev of the first kind
u_roots -- Chebyshev of the second kind
c_roots -- Chebyshev of the first kind ([-2,2] interval)
s_roots -- Chebyshev of the second kind ([-2,2] interval)
ts_roots -- Shifted Chebyshev of the first kind.
us_roots -- Shifted Chebyshev of the second kind.
p_roots -- Legendre
ps_roots -- Shifted Legendre
l_roots -- Laguerre
.. [golub.welsch-1969-mathcomp]
Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss
Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10.
.. [abramowitz.stegun-1965]
Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of
Mathematical Functions: with Formulas, Graphs, and Mathematical
Tables*. Gaithersburg, MD: National Bureau of Standards.
http://www.math.sfu.ca/~cbm/aands/
"""
#
# Author: Travis Oliphant 2000
# Updated Sep. 2003 (fixed bugs --- tested to be accurate)
from __future__ import division, print_function, absolute_import
# Scipy imports.
import numpy as np
from numpy import all, any, exp, inf, pi, sqrt
from scipy import linalg
# Local imports.
from . import _ufuncs as cephes
_gam = cephes.gamma
__all__ = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys',
'jacobi', 'laguerre', 'genlaguerre', 'hermite', 'hermitenorm',
'gegenbauer', 'sh_legendre', 'sh_chebyt', 'sh_chebyu', 'sh_jacobi',
'p_roots', 'ps_roots', 'j_roots', 'js_roots', 'l_roots', 'la_roots',
'he_roots', 'ts_roots', 'us_roots', 's_roots', 't_roots', 'u_roots',
'c_roots', 'cg_roots', 'h_roots',
'eval_legendre', 'eval_chebyt', 'eval_chebyu', 'eval_chebyc',
'eval_chebys', 'eval_jacobi', 'eval_laguerre', 'eval_genlaguerre',
'eval_hermite', 'eval_hermitenorm', 'eval_gegenbauer',
'eval_sh_legendre', 'eval_sh_chebyt', 'eval_sh_chebyu',
'eval_sh_jacobi', 'poch', 'binom']
# For backward compatibility
poch = cephes.poch
class orthopoly1d(np.poly1d):
def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None,
limits=None, monic=False, eval_func=None):
np.poly1d.__init__(self, roots, r=1)
equiv_weights = [weights[k] / wfunc(roots[k]) for
k in range(len(roots))]
self.__dict__['weights'] = np.array(list(zip(roots,
weights, equiv_weights)))
self.__dict__['weight_func'] = wfunc
self.__dict__['limits'] = limits
mu = sqrt(hn)
if monic:
evf = eval_func
if evf:
eval_func = lambda x: evf(x) / kn
mu = mu / abs(kn)
kn = 1.0
self.__dict__['normcoef'] = mu
self.__dict__['coeffs'] *= kn
# Note: eval_func will be discarded on arithmetic
self.__dict__['_eval_func'] = eval_func
def __call__(self, v):
if self._eval_func and not isinstance(v, np.poly1d):
return self._eval_func(v)
else:
return np.poly1d.__call__(self, v)
def _scale(self, p):
if p == 1.0:
return
self.__dict__['coeffs'] *= p
evf = self.__dict__['_eval_func']
if evf:
self.__dict__['_eval_func'] = lambda x: evf(x) * p
self.__dict__['normcoef'] *= p
def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu):
"""[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu)
Returns the roots (x) of an nth order orthogonal polynomial,
and weights (w) to use in appropriate Gaussian quadrature with that
orthogonal polynomial.
The polynomials have the recurrence relation
P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x)
an_func(n) should return A_n
sqrt_bn_func(n) should return sqrt(B_n)
mu ( = h_0 ) is the integral of the weight over the orthogonal
interval
"""
k = np.arange(n, dtype='d')
c = np.zeros((2, n))
c[0,1:] = bn_func(k[1:])
c[1,:] = an_func(k)
x = linalg.eigvals_banded(c, overwrite_a_band=True)
# improve roots by one application of Newton's method
y = f(n, x)
dy = df(n, x)
x -= y/dy
fm = f(n-1, x)
fm /= np.abs(fm).max()
dy /= np.abs(dy).max()
w = 1.0 / (fm * dy)
if symmetrize:
w = (w + w[::-1]) / 2
x = (x - x[::-1]) / 2
w *= mu0 / w.sum()
if mu:
return x, w, mu0
else:
return x, w
# Jacobi Polynomials 1 P^(alpha,beta)_n(x)
def j_roots(n, alpha, beta, mu=False):
"""Gauss-Jacobi quadrature
Computes the sample points and weights for Gauss-Jacobi quadrature. The
sample points are the roots of the `n`th degree Jacobi polynomial,
:math:`P^{\\alpha, \\beta}_n(x)`. These sample points and weights
correctly integrate polynomials of degree :math:`2*n - 1` or less over the
interval :math:`[-1, 1]` with weight function
:math:`f(x) = (1 - x)^{\\alpha} (1 + x)^{\\beta}`.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -1
beta : float
beta must be > 0
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha <= -1 or beta <= -1:
raise ValueError("alpha and beta must be greater than -1.")
if alpha == 0.0 and beta == 0.0:
return p_roots(m, mu)
if alpha == beta:
return cg_roots(m, alpha+0.5, mu)
mu0 = 2.0**(alpha+beta+1)*cephes.beta(alpha+1, beta+1)
a = alpha
b = beta
if a + b == 0.0:
an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b), 0.0)
else:
an_func = lambda k: np.where(k == 0, (b-a)/(2+a+b),
(b*b - a*a) / ((2.0*k+a+b)*(2.0*k+a+b+2)))
bn_func = lambda k: 2.0 / (2.0*k+a+b)*np.sqrt((k+a)*(k+b) / (2*k+a+b+1)) \
* np.where(k == 1, 1.0, np.sqrt(k*(k+a+b) / (2.0*k+a+b-1)))
f = lambda n, x: cephes.eval_jacobi(n, a, b, x)
df = lambda n, x: 0.5 * (n + a + b + 1) \
* cephes.eval_jacobi(n-1, a+1, b+1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
def jacobi(n, alpha, beta, monic=False):
"""Returns the nth order Jacobi polynomial, P^(alpha,beta)_n(x)
orthogonal over [-1,1] with weighting function
(1-x)**alpha (1+x)**beta with alpha,beta > -1.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: (1 - x)**alpha * (1 + x)**beta
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
eval_func=np.ones_like)
x, w, mu = j_roots(n, alpha, beta, mu=True)
ab1 = alpha + beta + 1.0
hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1)
hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1)
kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1)
# here kn = coefficient on x^n term
p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
lambda x: eval_jacobi(n, alpha, beta, x))
return p
# Jacobi Polynomials shifted G_n(p,q,x)
def js_roots(n, p1, q1, mu=False):
"""Gauss-Jacobi (shifted) quadrature
Computes the sample points and weights for Gauss-Jacobi (shifted)
quadrature. The sample points are the roots of the `n`th degree shifted
Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample points and weights
correctly integrate polynomials of degree :math:`2*n - 1` or less over the
interval :math:`[0, 1]` with weight function
:math:`f(x) = (1 - x)^{p-q} x^{q-1}`
Parameters
----------
n : int
quadrature order
p1 : float
(p1 - q1) must be > -1
q1 : float
q1 must be > 0
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
if (p1-q1) <= -1 or q1 <= 0:
raise ValueError("(p - q) must be greater than -1, and q must be greater than 0.")
xw = j_roots(n, p1-q1, q1-1, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_jacobi(n, p, q, monic=False):
"""Returns the nth order Jacobi polynomial, G_n(p,q,x)
orthogonal over [0,1] with weighting function
(1-x)**(p-q) (x)**(q-1) with p>q-1 and q > 0.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: (1.0 - x)**(p - q) * (x)**(q - 1.)
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
eval_func=np.ones_like)
n1 = n
x, w, mu0 = js_roots(n1, p, q, mu=True)
hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1)
hn /= (2 * n + p) * (_gam(2 * n + p)**2)
# kn = 1.0 in standard form so monic is redundant. Kept for compatibility.
kn = 1.0
pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic,
eval_func=lambda x: eval_sh_jacobi(n, p, q, x))
return pp
# Generalized Laguerre L^(alpha)_n(x)
def la_roots(n, alpha, mu=False):
"""Gauss-generalized Laguerre quadrature
Computes the sample points and weights for Gauss-generalized Laguerre
quadrature. The sample points are the roots of the `n`th degree generalized
Laguerre polynomial, :math:`L^{\\alpha}_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2*n - 1` or less
over the interval :math:`[0, inf]` with weight function
:math:`f(x) = x^{\\alpha} e^{-x}`.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -1
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha < -1:
raise ValueError("alpha must be greater than -1.")
mu0 = cephes.gamma(alpha + 1)
if m == 1:
x = np.array([alpha+1.0], 'd')
w = np.array([mu0], 'd')
if mu:
return x, w, mu0
else:
return x, w
an_func = lambda k: 2 * k + alpha + 1
bn_func = lambda k: -np.sqrt(k * (k + alpha))
f = lambda n, x: cephes.eval_genlaguerre(n, alpha, x)
df = lambda n, x: (n*cephes.eval_genlaguerre(n, alpha, x)
- (n + alpha)*cephes.eval_genlaguerre(n-1, alpha, x))/x
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
def genlaguerre(n, alpha, monic=False):
"""Returns the nth order generalized (associated) Laguerre polynomial,
L^(alpha)_n(x), orthogonal over [0,inf) with weighting function
exp(-x) x**alpha with alpha > -1
"""
if any(alpha <= -1):
raise ValueError("alpha must be > -1")
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = la_roots(n1, alpha, mu=True)
wfunc = lambda x: exp(-x) * x**alpha
if n == 0:
x, w = [], []
hn = _gam(n + alpha + 1) / _gam(n + 1)
kn = (-1)**n / _gam(n + 1)
p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic,
lambda x: eval_genlaguerre(n, alpha, x))
return p
# Laguerre L_n(x)
def l_roots(n, mu=False):
"""Gauss-Laguerre quadrature
Computes the sample points and weights for Gauss-Laguerre quadrature.
The sample points are the roots of the `n`th degree Laguerre polynomial,
:math:`L_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[0, inf]` with weight function :math:`f(x) = e^{-x}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
numpy.polynomial.laguerre.laggauss
"""
return la_roots(n, 0.0, mu=mu)
def laguerre(n, monic=False):
"""Return the nth order Laguerre polynoimal, L_n(x), orthogonal over
[0,inf) with weighting function exp(-x)
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = l_roots(n1, mu=True)
if n == 0:
x, w = [], []
hn = 1.0
kn = (-1)**n / _gam(n + 1)
p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic,
lambda x: eval_laguerre(n, x))
return p
# Hermite 1 H_n(x)
def h_roots(n, mu=False):
"""Gauss-Hermite (physicst's) quadrature
Computes the sample points and weights for Gauss-Hermite quadrature.
The sample points are the roots of the `n`th degree Hermite polynomial,
:math:`H_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-inf, inf]` with weight function :math:`f(x) = e^{-x^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
numpy.polynomial.hermite.hermgauss
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = np.sqrt(np.pi)
an_func = lambda k: 0.0*k
bn_func = lambda k: np.sqrt(k/2.0)
f = cephes.eval_hermite
df = lambda n, x: 2.0 * n * cephes.eval_hermite(n-1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def hermite(n, monic=False):
"""Return the nth order Hermite polynomial, H_n(x), orthogonal over
(-inf,inf) with weighting function exp(-x**2)
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = h_roots(n1, mu=True)
wfunc = lambda x: exp(-x * x)
if n == 0:
x, w = [], []
hn = 2**n * _gam(n + 1) * sqrt(pi)
kn = 2**n
p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic,
lambda x: eval_hermite(n, x))
return p
# Hermite 2 He_n(x)
def he_roots(n, mu=False):
"""Gauss-Hermite (statistician's) quadrature
Computes the sample points and weights for Gauss-Hermite quadrature.
The sample points are the roots of the `n`th degree Hermite polynomial,
:math:`He_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-inf, inf]` with weight function :math:`f(x) = e^{-(x/2)^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
numpy.polynomial.hermite_e.hermegauss
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = np.sqrt(np.pi/2.0)
an_func = lambda k: 0.0*k
bn_func = lambda k: np.sqrt(k)
f = cephes.eval_hermitenorm
df = lambda n, x: n * cephes.eval_hermitenorm(n-1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def hermitenorm(n, monic=False):
"""Return the nth order normalized Hermite polynomial, He_n(x), orthogonal
over (-inf,inf) with weighting function exp(-(x/2)**2)
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = he_roots(n1, mu=True)
wfunc = lambda x: exp(-x * x / 4.0)
if n == 0:
x, w = [], []
hn = sqrt(2 * pi) * _gam(n + 1)
kn = 1.0
p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic,
eval_func=lambda x: eval_hermitenorm(n, x))
return p
# The remainder of the polynomials can be derived from the ones above.
# Ultraspherical (Gegenbauer) C^(alpha)_n(x)
def cg_roots(n, alpha, mu=False):
"""Gauss-Gegenbauer quadrature
Computes the sample points and weights for Gauss-Gegenbauer quadrature.
The sample points are the roots of the `n`th degree Gegenbauer polynomial,
:math:`C^{\\alpha}_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-1, 1]` with weight function :math:`f(x) = (1-x^2)^{\\alpha-1/2}`.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -0.5
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha < -0.5:
raise ValueError("alpha must be greater than -0.5.")
elif alpha == 0.0:
# C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x)
# strictly, we should just error out here, since the roots are not
# really defined, but we used to return something useful, so let's
# keep doing so.
return t_roots(n, mu)
mu0 = np.sqrt(np.pi) * cephes.gamma(alpha + 0.5) / cephes.gamma(alpha + 1)
an_func = lambda k: 0.0 * k
bn_func = lambda k: np.sqrt(k * (k + 2 * alpha - 1)
/ (4 * (k + alpha) * (k + alpha - 1)))
f = lambda n, x: cephes.eval_gegenbauer(n, alpha, x)
df = lambda n, x: (-n*x*cephes.eval_gegenbauer(n, alpha, x)
+ (n + 2*alpha - 1)*cephes.eval_gegenbauer(n-1, alpha, x))/(1-x**2)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def gegenbauer(n, alpha, monic=False):
"""Return the nth order Gegenbauer (ultraspherical) polynomial,
C^(alpha)_n(x), orthogonal over [-1,1] with weighting function
(1-x**2)**(alpha-1/2) with alpha > -1/2
"""
base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic)
if monic:
return base
# Abrahmowitz and Stegan 22.5.20
factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) /
_gam(2*alpha) / _gam(alpha + 0.5 + n))
base._scale(factor)
base.__dict__['_eval_func'] = lambda x: eval_gegenbauer(float(n), alpha, x)
return base
# Chebyshev of the first kind: T_n(x) =
# n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x)
# Computed anew.
def t_roots(n, mu=False):
"""Gauss-Chebyshev (first kind) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree Chebyshev polynomial of
the first kind, :math:`T_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-1, 1]` with weight function :math:`f(x) = 1/\sqrt{1 - x^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
numpy.polynomial.chebyshev.chebgauss
"""
m = int(n)
if n < 1 or n != m:
raise ValueError('n must be a positive integer.')
x = np.cos(np.arange(2 * m - 1, 0, -2) * pi / (2 * m))
w = np.empty_like(x)
w.fill(pi/m)
if mu:
return x, w, pi
else:
return x, w
def chebyt(n, monic=False):
"""Return nth order Chebyshev polynomial of first kind, Tn(x). Orthogonal
over [-1,1] with weight function (1-x**2)**(-1/2).
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: 1.0 / sqrt(1 - x * x)
if n == 0:
return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic,
lambda x: eval_chebyt(n, x))
n1 = n
x, w, mu = t_roots(n1, mu=True)
hn = pi / 2
kn = 2**(n - 1)
p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
lambda x: eval_chebyt(n, x))
return p
# Chebyshev of the second kind
# U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x)
def u_roots(n, mu=False):
"""Gauss-Chebyshev (second kind) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree Chebyshev polynomial of
the second kind, :math:`U_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-1, 1]` with weight function :math:`f(x) = \sqrt{1 - x^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
m = int(n)
if n < 1 or n != m:
raise ValueError('n must be a positive integer.')
t = np.arange(m, 0, -1) * pi / (m + 1)
x = np.cos(t)
w = pi * np.sin(t)**2 / (m + 1)
if mu:
return x, w, pi / 2
else:
return x, w
def chebyu(n, monic=False):
"""Return nth order Chebyshev polynomial of second kind, Un(x). Orthogonal
over [-1,1] with weight function (1-x**2)**(1/2).
"""
base = jacobi(n, 0.5, 0.5, monic=monic)
if monic:
return base
factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5)
base._scale(factor)
return base
# Chebyshev of the first kind C_n(x)
def c_roots(n, mu=False):
"""Gauss-Chebyshev (first kind) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree Chebyshev polynomial of
the first kind, :math:`C_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-2, 2]` with weight function :math:`f(x) = 1/\sqrt{1 - (x/2)^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
xw = t_roots(n, mu)
return (2 * xw[0],) + xw[1:]
def chebyc(n, monic=False):
"""Return nth order Chebyshev polynomial of first kind, Cn(x). Orthogonal
over [-2,2] with weight function (1-(x/2)**2)**(-1/2).
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = c_roots(n1, mu=True)
if n == 0:
x, w = [], []
hn = 4 * pi * ((n == 0) + 1)
kn = 1.0
p = orthopoly1d(x, w, hn, kn,
wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0),
limits=(-2, 2), monic=monic)
if not monic:
p._scale(2.0 / p(2))
p.__dict__['_eval_func'] = lambda x: eval_chebyc(n, x)
return p
# Chebyshev of the second kind S_n(x)
def s_roots(n, mu=False):
"""Gauss-Chebyshev (second kind) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree Chebyshev polynomial of
the second kind, :math:`S_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-2, 2]` with weight function :math:`f(x) = \sqrt{1 - (x/2)^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
xw = u_roots(n, mu)
return (2 * xw[0],) + xw[1:]
def chebys(n, monic=False):
"""Return nth order Chebyshev polynomial of second kind, Sn(x). Orthogonal
over [-2,2] with weight function (1-(x/)**2)**(1/2).
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = s_roots(n1, mu=True)
if n == 0:
x, w = [], []
hn = pi
kn = 1.0
p = orthopoly1d(x, w, hn, kn,
wfunc=lambda x: sqrt(1 - x * x / 4.0),
limits=(-2, 2), monic=monic)
if not monic:
factor = (n + 1.0) / p(2)
p._scale(factor)
p.__dict__['_eval_func'] = lambda x: eval_chebys(n, x)
return p
# Shifted Chebyshev of the first kind T^*_n(x)
def ts_roots(n, mu=False):
"""Gauss-Chebyshev (first kind, shifted) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree shifted Chebyshev
polynomial of the first kind, :math:`T_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2*n - 1` or less
over the interval :math:`[0, 1]` with weight function
:math:`f(x) = 1/\sqrt{x - x^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
xw = t_roots(n, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_chebyt(n, monic=False):
"""Return nth order shifted Chebyshev polynomial of first kind, Tn(x).
Orthogonal over [0,1] with weight function (x-x**2)**(-1/2).
"""
base = sh_jacobi(n, 0.0, 0.5, monic=monic)
if monic:
return base
if n > 0:
factor = 4**n / 2.0
else:
factor = 1.0
base._scale(factor)
return base
# Shifted Chebyshev of the second kind U^*_n(x)
def us_roots(n, mu=False):
"""Gauss-Chebyshev (second kind, shifted) quadrature
Computes the sample points and weights for Gauss-Chebyshev quadrature.
The sample points are the roots of the `n`th degree shifted Chebyshev
polynomial of the second kind, :math:`U_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2*n - 1` or less
over the interval :math:`[0, 1]` with weight function
:math:`f(x) = \sqrt{x - x^2}`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
xw = u_roots(n, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_chebyu(n, monic=False):
"""Return nth order shifted Chebyshev polynomial of second kind, Un(x).
Orthogonal over [0,1] with weight function (x-x**2)**(1/2).
"""
base = sh_jacobi(n, 2.0, 1.5, monic=monic)
if monic:
return base
factor = 4**n
base._scale(factor)
return base
# Legendre
def p_roots(n, mu=False):
"""Gauss-Legendre quadrature
Computes the sample points and weights for Gauss-Legendre quadrature.
The sample points are the roots of the `n`th degree Legendre polynomial
:math:`P_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[-1, 1]` with weight function :math:`f(x) = 1.0`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
numpy.polynomial.legendre.leggauss
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = 2.0
an_func = lambda k: 0.0 * k
bn_func = lambda k: k * np.sqrt(1.0 / (4 * k * k - 1))
f = cephes.eval_legendre
df = lambda n, x: (-n*x*cephes.eval_legendre(n, x)
+ n*cephes.eval_legendre(n-1, x))/(1-x**2)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def legendre(n, monic=False):
"""
Legendre polynomial coefficients
Returns the nth-order Legendre polynomial, P_n(x), orthogonal over
[-1, 1] with weight function 1.
Parameters
----------
n
Order of the polynomial
monic : bool, optional
If True, output is a monic polynomial (normalized so the leading
coefficient is 1). Default is False.
Returns
-------
P : orthopoly1d
The Legendre polynomial object
Examples
--------
Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0):
>>> legendre(3)
poly1d([ 2.5, 0. , -1.5, -0. ])
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w, mu0 = p_roots(n1, mu=True)
if n == 0:
x, w = [], []
hn = 2.0 / (2 * n + 1)
kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n
p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1),
monic=monic, eval_func=lambda x: eval_legendre(n, x))
return p
# Shifted Legendre P^*_n(x)
def ps_roots(n, mu=False):
"""Gauss-Legendre (shifted) quadrature
Computes the sample points and weights for Gauss-Legendre quadrature.
The sample points are the roots of the `n`th degree shifted Legendre
polynomial :math:`P^*_n(x)`. These sample points and weights correctly
integrate polynomials of degree :math:`2*n - 1` or less over the interval
:math:`[0, 1]` with weight function :math:`f(x) = 1.0`.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
integrate.quadrature
integrate.fixed_quad
"""
xw = p_roots(n, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_legendre(n, monic=False):
"""Returns the nth order shifted Legendre polynomial, P^*_n(x), orthogonal
over [0,1] with weighting function 1.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
wfunc = lambda x: 0.0 * x + 1.0
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic,
lambda x: eval_sh_legendre(n, x))
x, w, mu0 = ps_roots(n, mu=True)
hn = 1.0 / (2 * n + 1.0)
kn = _gam(2 * n + 1) / _gam(n + 1)**2
p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic,
eval_func=lambda x: eval_sh_legendre(n, x))
return p
# -----------------------------------------------------------------------------
# Vectorized functions for evaluation
# -----------------------------------------------------------------------------
from ._ufuncs import (binom, eval_jacobi, eval_sh_jacobi, eval_gegenbauer,
eval_chebyt, eval_chebyu, eval_chebys, eval_chebyc,
eval_sh_chebyt, eval_sh_chebyu, eval_legendre,
eval_sh_legendre, eval_genlaguerre, eval_laguerre,
eval_hermite, eval_hermitenorm)
|
wbond/oscrypto | refs/heads/master | dev/build.py | 7 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import imp
import os
import tarfile
import zipfile
import setuptools.sandbox
from . import package_root, package_name, has_tests_package
def _list_zip(filename):
"""
Prints all of the files in a .zip file
"""
zf = zipfile.ZipFile(filename, 'r')
for name in zf.namelist():
print(' %s' % name)
def _list_tgz(filename):
"""
Prints all of the files in a .tar.gz file
"""
tf = tarfile.open(filename, 'r:gz')
for name in tf.getnames():
print(' %s' % name)
def run():
"""
Creates a sdist .tar.gz and a bdist_wheel --univeral .whl
:return:
A bool - if the packaging process was successful
"""
setup = os.path.join(package_root, 'setup.py')
tests_root = os.path.join(package_root, 'tests')
tests_setup = os.path.join(tests_root, 'setup.py')
# Trying to call setuptools.sandbox.run_setup(setup, ['--version'])
# resulted in a segfault, so we do this instead
module_info = imp.find_module('version', [os.path.join(package_root, package_name)])
version_mod = imp.load_module('%s.version' % package_name, *module_info)
pkg_name_info = (package_name, version_mod.__version__)
print('Building %s-%s' % pkg_name_info)
sdist = '%s-%s.tar.gz' % pkg_name_info
whl = '%s-%s-py2.py3-none-any.whl' % pkg_name_info
setuptools.sandbox.run_setup(setup, ['-q', 'sdist'])
print(' - created %s' % sdist)
_list_tgz(os.path.join(package_root, 'dist', sdist))
setuptools.sandbox.run_setup(setup, ['-q', 'bdist_wheel', '--universal'])
print(' - created %s' % whl)
_list_zip(os.path.join(package_root, 'dist', whl))
setuptools.sandbox.run_setup(setup, ['-q', 'clean'])
if has_tests_package:
print('Building %s_tests-%s' % (package_name, version_mod.__version__))
tests_sdist = '%s_tests-%s.tar.gz' % pkg_name_info
tests_whl = '%s_tests-%s-py2.py3-none-any.whl' % pkg_name_info
setuptools.sandbox.run_setup(tests_setup, ['-q', 'sdist'])
print(' - created %s' % tests_sdist)
_list_tgz(os.path.join(tests_root, 'dist', tests_sdist))
setuptools.sandbox.run_setup(tests_setup, ['-q', 'bdist_wheel', '--universal'])
print(' - created %s' % tests_whl)
_list_zip(os.path.join(tests_root, 'dist', tests_whl))
setuptools.sandbox.run_setup(tests_setup, ['-q', 'clean'])
dist_dir = os.path.join(package_root, 'dist')
tests_dist_dir = os.path.join(tests_root, 'dist')
os.rename(
os.path.join(tests_dist_dir, tests_sdist),
os.path.join(dist_dir, tests_sdist)
)
os.rename(
os.path.join(tests_dist_dir, tests_whl),
os.path.join(dist_dir, tests_whl)
)
os.rmdir(tests_dist_dir)
return True
|
19po/rtl102.5-playlist | refs/heads/master | Main.py | 1 | #!/usr/bin/env python
from PyQt4 import QtGui
from MainUI import MainUI
import sys
__author__ = 'postrowski'
# -*-coding: utf-8-*-
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
ui = MainUI()
ui.show()
app.exec_()
sys.exit()
|
Jeongseob/xen-coboost-sched | refs/heads/master | tools/python/logging/logging-0.4.9.2/test/logconf.py | 42 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A simple-minded GUI configurator for the logging module, using Tkinter.
Should work under Python versions >= 1.5.2.
Copyright (C) 2002 Vinay Sajip. All Rights Reserved.
Configuration files are read/written using ConfigParser.
"""
"""
(C) 2002 Vinay Sajip. All rights reserved.
"""
from Tkinter import *
from tkFileDialog import *
from tkMessageBox import *
import os, sys, string, types
import ConfigParser
active = None
__version__ = "0.4.1"
DEFAULT_FILENAME = "logconf.ini"
LOGGING_LEVELS = (
("NOTSET", "NOTSET"),
("DEBUG", "DEBUG"),
("INFO", "INFO"),
("WARNING", "WARNING"),
("ERROR", "ERROR"),
("CRITICAL", "CRITICAL")
)
HANDLER_TYPES = (
("StreamHandlerProxy", "StreamHandler"),
("FileHandlerProxy", "FileHandler"),
("RotatingFileHandlerProxy", "RotatingFileHandler"),
("SocketHandlerProxy", "SocketHandler"),
("DatagramHandlerProxy", "DatagramHandler"),
("SysLogHandlerProxy", "SysLogHandler"),
("NTEventLogHandlerProxy", "NTEventLogHandler"),
("SMTPHandlerProxy", "SMTPHandler"),
("MemoryHandlerProxy", "MemoryHandler"),
("HTTPHandlerProxy", "HTTPHandler"),
# ("SOAPHandlerProxy", "SOAPHandler"),
)
OUTPUT_STREAMS = (
("sys.stdout", "sys.stdout"),
("sys.stderr", "sys.stderr")
)
FILE_MODES = (
("a", "a"),
("w", "w")
)
HTTP_METHODS = (
("GET", "GET"),
("POST", "POST")
)
SYSLOG_FACILITIES = (
("LOG_AUTH", "auth"),
("LOG_AUTHPRIV", "authpriv"),
("LOG_CRON", "cron"),
("LOG_DAEMON", "daemon"),
("LOG_KERN", "kern"),
("LOG_LPR", "lpr"),
("LOG_MAIL", "mail"),
("LOG_NEWS", "news"),
("LOG_AUTH", "security"),
("LOG_SYSLOG", "syslog"),
("LOG_USER", "user"),
("LOG_UUCP", "uucp"),
("LOG_LOCAL0", "local0"),
("LOG_LOCAL1", "local1"),
("LOG_LOCAL2", "local2"),
("LOG_LOCAL3", "local3"),
("LOG_LOCAL4", "local4"),
("LOG_LOCAL5", "local5"),
("LOG_LOCAL6", "local6"),
("LOG_LOCAL7", "local7"),
)
LOG_TYPES = (
("Application", "Application"),
("System", "System"),
("Security", "Security")
)
BOOLEAN_VALUES = (
("0", "False"),
("1", "True")
)
class Property:
def __init__(self, name, caption, value=None, choices=None):
self.name = name
self.caption = caption
self.value = value
self.choices = choices
def getChoices(self):
return self.choices
def isvalid(self, s):
return 0
def getCaption(self):
return self.caption
def getValue(self):
return self.value
def getChoiceText(self, val):
rv = ""
choices = self.getChoices()
if choices:
for choice in choices:
if choice[0] == val:
rv = choice[1]
break
return rv
def setValue(self, val):
self.value = val
def getValueText(self):
if type(self.value) in [types.ListType, types.TupleType]:
v = list(self.value)
else:
v = [self.value]
choices = self.getChoices()
if choices:
v = map(self.getChoiceText, v)
return string.join(v, ',')
class PropertyHolder:
def __init__(self, dict):
self.dict = dict
self.propnames = []
self.onPropListChanged = None
def getPropNames(self):
"""
Return the property names in the order in which they are to
be listed.
"""
return self.propnames
def getProp(self, name):
return self.dict[name]
def isReadonly(self, name):
return 0
#convenience methods
def getPropValue(self, name):
return self.dict[name].value
def setPropValue(self, name, value):
self.dict[name].setValue(value)
LINE_COLOUR = '#999999'
class ScrollingList(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent)
self.parent = parent
self.listener = self.parent
self.sb = Scrollbar(self, orient=VERTICAL)
kwargs["yscrollcommand"] = self.sb.set
self.list = apply(Listbox, (self,) + args, kwargs)
self.sb.config(command=self.list.yview)
self.sb.pack(side=RIGHT, fill=Y)
self.list.pack(side=LEFT, fill=BOTH,expand=1)
self.list.bind('<ButtonRelease-1>', self.onListChange)
self.choices = None
def setContents(self, choices, value):
self.choices = choices
self.value = value
self.list.delete(0, END)
if type(value) == types.ListType:
sm = EXTENDED
else:
sm = BROWSE
self.list.configure(selectmode=sm)
i = 0
for choice in choices:
self.list.insert(END, choice[1])
if sm == EXTENDED:
if choice[0] in value:
self.list.select_set(i)
else:
if choice[0] == value:
self.list.select_set(i)
i = i + 1
def getValue(self):
if type(self.value) == types.ListType:
multi = 1
rv = []
else:
multi = 0
for i in xrange(len(self.choices)):
if self.list.select_includes(i):
if not multi:
rv = self.choices[i][0]
break
else:
rv.append(self.choices[i][0])
return rv
def onListChange(self, event):
self.value = self.getValue()
self.listener.onListChange(self.value)
class PropertyHeader(Canvas):
def __init__(self, parent, *args, **kwargs):
self.namewidth = 120
if kwargs.has_key("namewidth"):
self.namewidth = kwargs["namewidth"]
del kwargs["namewidth"]
self.rowheight = 16
if kwargs.has_key("rowheight"):
self.rowheight = kwargs["rowheight"]
del kwargs["rowheight"]
apply(Canvas.__init__, (self, parent)+args, kwargs)
self.bind('<Configure>', self.onConfigure)
x = 5
y = 0
wid = int(self.cget('width'))
self.create_text(x, y, text='Property', anchor='nw')
self.create_text(x + self.namewidth, y, text='Value', anchor='nw')
self.create_line(self.namewidth, 0, self.namewidth, self.rowheight, fill=LINE_COLOUR)
self.tline = self.create_line(0, 0, wid, 0, fill=LINE_COLOUR)
#self.create_line(0, 0, 0, self.rowheight, fill=LINE_COLOUR)
#self.create_line(wid - 1, 0, wid - 1, self.rowheight, fill=LINE_COLOUR)
def onConfigure(self, event):
self.delete(self.tline)
self.tline = self.create_line(0, 0, event.width, 0, fill=LINE_COLOUR)
_popup = None
class PropertyCanvas(Canvas):
def __init__(self, parent, *args, **kwargs):
self.namewidth = 120
if kwargs.has_key("namewidth"):
self.namewidth = kwargs["namewidth"]
del kwargs["namewidth"]
self.rowheight = 16
if kwargs.has_key("rowheight"):
self.rowheight = kwargs["rowheight"]
del kwargs["rowheight"]
apply(Canvas.__init__, (self, parent)+args, kwargs)
self.namitems = []
self.valitems = []
self.lines = []
self.pnames = []
#Event bindings...
self.bind('<Enter>', self.onEnter)
self.bind('<Button-1>', self.onClick)
self.bind('<Configure>', self.onConfigure)
self.button = Button(height=self.rowheight, width=self.rowheight, text='...', command=self.onEdit)
self.btnitem = None
self.editor = Entry()
self.edititem = None
self.popup = Toplevel()
self.popup.withdraw()
self.popup.overrideredirect(1)
self.list = ScrollingList(self.popup, background='white', relief=FLAT, borderwidth=0)
self.list.pack(fill=BOTH, expand=1)
self.list.listener = self
self.listvisible = 0
def clear(self):
for itm in self.namitems:
self.delete(itm)
self.namitems = []
for itm in self.valitems:
self.delete(itm)
self.valitems = []
for lin in self.lines:
self.delete(lin)
self.lines = []
def setPropertyHolder(self, ph):
self.ph = ph
self.pnames = ph.getPropNames()
wid = int(self.cget('width'))
hei = int(self.cget('height'))
self.clear()
x = 5
y = 0
i = 0
self.props = []
for n in self.pnames:
prop = self.ph.getProp(n)
self.props.append(prop)
tn = "n%d" % i
tv = "v%d" % i
self.namitems.append(self.create_text(x, y + 2, text=prop.getCaption(), anchor='nw', tags=tn))
self.valitems.append(self.create_text(x + self.namewidth, y + 2, text=prop.getValueText(), anchor='nw', tags=tv))
y = y + self.rowheight
i = i + 1
self.drawLines(wid, hei)
#self.config(height=y)
def drawLines(self, wid, hei):
for lin in self.lines:
self.delete(lin)
self.lines = []
y = 0
for i in xrange(len(self.pnames)):
self.lines.append(self.create_line(0, y, wid, y, fill=LINE_COLOUR))
y = y + self.rowheight
self.lines.append(self.create_line(0, y, wid, y, fill=LINE_COLOUR))
self.create_line(self.namewidth, 0, self.namewidth, hei, fill=LINE_COLOUR)
def onEnter(self, event):
if not self.edititem and not self.listvisible:
self.focus_set()
def hideControls(self):
if self.listvisible:
self.popup.withdraw()
global _popup
_popup = None
self.listvisible = 0
if self.edititem:
self.ph.setPropValue(self.editprop.name, self.editor.get())
self.itemconfig(self.valitems[self.editrow], text=self.editprop.getValueText())
self.delete(self.edititem)
self.edititem = None
if self.btnitem:
self.delete(self.btnitem)
self.btnitem = None
def onClick(self, event):
row = event.y / self.rowheight
self.hideControls()
if row < len(self.pnames):
wid = int(self.cget('width'))
hei = self.rowheight
prop = self.props[row]
if not self.ph.isReadonly(self.pnames[row]):
self.editrow = row
self.editprop = prop
choices = prop.getChoices()
if choices != None:
val = prop.getValue()
self.list.setContents(choices, val)
self.listy = row * hei + self.rowheight
self.btnitem = self.create_window(wid - hei, row * hei, width=hei, height=hei, window=self.button, anchor='nw', tags='button')
else:
self.editor.delete(0, END)
self.editor.insert(0, prop.getValueText())
self.editor.select_range(0, END)
self.edititem = self.create_window(self.namewidth + 1, row * hei, width=wid - self.namewidth, height = hei + 1, window=self.editor, anchor='nw', tags='editor')
self.editor.focus_set()
def onConfigure(self, event):
self.hideControls()
self.drawLines(event.width, event.height)
self.configure(width=event.width, height=event.height)
def onEdit(self):
wid = int(self.cget('width'))
#self.listitem = self.create_window(self.namewidth + 1, self.listy, width=wid - self.namewidth - 1, height = self.rowheight * 3, window=self.list, anchor='nw', tags='list')
w = wid - self.namewidth - 1
h = self.rowheight * 5
x = self.winfo_rootx() + self.namewidth + 1
y = self.winfo_rooty() + self.listy
s = "%dx%d+%d+%d" % (w, h, x, y)
self.popup.deiconify()
self.popup.lift()
self.popup.focus_set()
self.listvisible = 1
self.list.focus_set()
#For some reason with 1.5.2 (Windows), making the geometry call
#immediately following the assignment to s doesn't work. So we
#do it here
self.popup.geometry(s)
global _popup
_popup = self.popup
def onListChange(self, val):
self.ph.setPropValue(self.editprop.name, val)
self.itemconfig(self.valitems[self.editrow], text=self.editprop.getValueText())
if type(val) != types.ListType:
self.hideControls()
class PropertyEditor(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent)
self.parent = parent
nw = kwargs.get("namewidth", 120)
rh = kwargs.get("rowheight", 16)
wid = kwargs.get("width", 300)
hei = kwargs.get("height", 60)
self.header = PropertyHeader(self, namewidth=nw, rowheight=rh, height=14, highlightthickness=0)
self.body = PropertyCanvas(self, namewidth=nw, rowheight=rh, width=wid, height=hei, background='white', highlightthickness=0)
self.header.pack(side=TOP, fill=X)
self.body.pack(side=BOTTOM, fill=BOTH, expand=1)
def setPropertyHolder(self, ph):
self.body.setPropertyHolder(ph)
class ADUPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.add = Button(self, text="New", command=parent.onAdd)
self.add.pack(side=LEFT) #, fill=X, expand=1)
self.rmv = Button(self, text="Delete", command=parent.onDelete)
self.rmv.pack(side=LEFT) #, fill=X, expand=1)
#self.upd = Button(self, text="Update", command=parent.onUpdate)
#self.upd.pack(side=RIGHT, fill=X, expand=1)
class ScrollList(Frame):
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent)
self.parent = parent
self.sb = Scrollbar(self, orient=VERTICAL)
kwargs["yscrollcommand"] = self.sb.set
self.list = apply(Listbox, (self,) + args, kwargs)
self.sb.config(command=self.list.yview)
self.sb.pack(side=RIGHT, fill=Y)
self.list.pack(side=LEFT, fill=BOTH,expand=1)
def sortqn(log1, log2):
qn1 = log1.getQualifiedName()
qn2 = log2.getQualifiedName()
if qn1 == "(root)":
rv = -1
elif qn2 == "(root)":
rv = 1
else:
rv = cmp(qn1, qn2)
return rv
def sortn(obj1, obj2):
return cmp(obj1.getPropValue("name"), obj2.getPropValue("name"))
class LoggerPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
label = Label(self, text="Loggers:")
label.grid(row=0, column=0, sticky='w')
self.slist = ScrollList(self, height=15, background='white')
self.slist.list.bind('<ButtonRelease-1>', self.onListChange)
self.slist.grid(row=1, column=0, sticky="nsew")
self.adu = ADUPanel(self)
self.adu.grid(row=2, column=0, sticky="we")
label = Label(self, text="Properties of selected logger:")
label.grid(row=3, column=0, sticky='w')
self.pe = PropertyEditor(self, height=120, borderwidth=1)
self.pe.grid(row=4, column=0, sticky='nsew')
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=3)
self.rowconfigure(4, weight=1)
def setConfig(self, config):
self.config = config
#populate list of loggers
llist = config.getLoggers()
llist.sort(sortqn)
self.slist.list.delete(0, END)
self.pe.body.clear()
self.names = []
for logger in llist:
self.names.append(logger.getPropValue("name"))
self.slist.list.insert(END, logger.getQualifiedName())
def onAdd(self):
items = self.slist.list.curselection()
if not len(items):
showerror("No Parent Selected", "You haven't selected a parent logger.")
else:
idx = int(items[0])
parent = self.config.getLogger(self.names[idx])
log = self.config.getLogger(None)
log.onChannelChanged = self.onChannelChanged
log.setPropValue("parent", parent.getPropValue("name"))
self.names.insert(1 + idx, log.getPropValue("name"))
self.slist.list.insert(1 + idx, log.getQualifiedName())
self.slist.list.select_clear(0, END)
self.slist.list.select_set(1 + idx)
self.pe.setPropertyHolder(log)
def onDelete(self):
items = self.slist.list.curselection()
if not len(items):
showerror("No Item Selected", "You haven't selected anything to delete.")
else:
idx = int(items[0])
name = self.slist.list.get(idx)
if name == "(root)":
showerror("Root Item Selected", "You cannot delete the root logger.")
else:
resp = askyesno("Logger Deletion", "Are you sure you want to delete logger '%s'?" % name)
if resp:
#self.config.removeLogger(self.names[idx])
log = self.config.getLogger(self.names[idx])
log.deleted = 1
self.slist.list.delete(idx)
del self.names[idx]
self.pe.body.clear()
def onChannelChanged(self, nm, chname):
i = self.names.index(nm)
sel = i
while i < len(self.names):
log = self.config.getLogger(self.names[i])
self.slist.list.delete(i)
self.slist.list.insert(i, log.getQualifiedName())
i = i + 1
self.slist.list.select_clear(0, END)
self.slist.list.select_set(sel)
def onListChange(self, event):
self.pe.body.hideControls()
items = self.slist.list.curselection()
idx = int(items[0])
name = self.names[idx]
log = self.config.getLogger(name)
self.pe.setPropertyHolder(log)
class HandlerPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
label = Label(self, text="Handlers:")
label.grid(row=0, column=0, sticky='w')
self.slist = ScrollList(self, height=6, background='white')
self.slist.list.bind('<ButtonRelease-1>', self.onListChange)
self.slist.grid(row=1, column=0, sticky="nsew")
self.adu = ADUPanel(self)
self.adu.grid(row=2, column=0, sticky="we")
label = Label(self, text="Properties of selected handler:")
label.grid(row=3, column=0, sticky='w')
self.pe = PropertyEditor(self, height=90, borderwidth=1)
self.pe.grid(row=4, column=0, sticky='nsew')
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.rowconfigure(4, weight=1)
def setConfig(self, config):
self.config = config
#populate list of handlers
hlist = config.getHandlers()
hlist.sort(sortn)
self.slist.list.delete(0, END)
self.pe.body.clear()
for hand in hlist:
hand.onPropListChanged = self.onPropListChanged
self.slist.list.insert(END, hand.getPropValue("name"))
def onAdd(self):
self.pe.body.hideControls()
hand = self.config.getHandler(None)
self.slist.list.insert(END, hand.getProp("name").getValueText())
self.slist.list.select_clear(0, END)
self.slist.list.select_set(END)
hand.onPropListChanged = self.onPropListChanged
self.pe.setPropertyHolder(hand)
def onDelete(self):
items = self.slist.list.curselection()
if not len(items):
showerror("No Item Selected", "You haven't selected anything to delete")
else:
name = self.slist.list.get(int(items[0]))
log = self.config.handlerIsUsed(name)
if log:
showerror("Handler in use",
"The handler '%s' is being used by logger '%s'"\
", so it cannot be deleted." % (
name, log))
else:
self.config.removeHandler(name)
self.slist.list.delete(items)
self.pe.body.clear()
def onUpdate(self):
print "handler update"
def onListChange(self, event):
self.pe.body.hideControls()
items = self.slist.list.curselection()
name = self.slist.list.get(int(items[0]))
hand = self.config.getHandler(name)
self.pe.setPropertyHolder(hand)
def onPropListChanged(self, newhand):
newhand.onPropListChanged = self.onPropListChanged
self.pe.setPropertyHolder(newhand)
class FormatterPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
label = Label(self, text="Formatters:")
label.grid(row=0, column=0, sticky='w')
self.slist = ScrollList(self, height=4, background='white')
self.slist.list.bind('<ButtonRelease-1>', self.onListChange)
self.slist.grid(row=1, column=0, sticky="nsew")
self.adu = ADUPanel(self)
self.adu.grid(row=2, column=0, sticky="ew")
label = Label(self, text="Properties of selected formatter:")
label.grid(row=3, column=0, sticky='w')
self.pe = PropertyEditor(self, height=60, borderwidth=1)
self.pe.grid(row=4, column=0, sticky='nsew')
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.rowconfigure(4, weight=1)
def setConfig(self, config):
self.config = config
#populate list of formatters
flist = config.getFormatters()
flist.sort(sortn)
self.slist.list.delete(0, END)
self.pe.body.clear()
for form in flist:
self.slist.list.insert(END, form.getPropValue("name"))
def onAdd(self):
self.pe.body.hideControls()
fmt = self.config.getFormatter(None)
self.slist.list.insert(END, fmt.getProp("name").getValueText())
self.slist.list.select_clear(0, END)
i = self.slist.list.size()
self.slist.list.select_set(i - 1)
self.pe.setPropertyHolder(fmt)
def onDelete(self):
self.pe.body.hideControls()
items = self.slist.list.curselection()
if not len(items):
showerror("No Item Selected", "You haven't selected anything to delete")
else:
name = self.slist.list.get(int(items[0]))
h = self.config.formatterIsUsed(name)
if h:
showerror("Formatter in use",
"The formatter '%s' is being used by handler '%s'"\
", so it cannot be deleted." % (
name, h))
else:
self.config.removeFormatter(name)
self.slist.list.delete(items)
self.pe.body.clear()
def onUpdate(self):
self.pe.body.hideControls()
def onListChange(self, event):
self.pe.body.hideControls()
items = self.slist.list.curselection()
name = self.slist.list.get(int(items[0]))
fmt = self.config.getFormatter(name)
self.pe.setPropertyHolder(fmt)
class FilterPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
label = Label(self, text="Filters:")
label.grid(row=0, column=0, sticky='w')
self.slist = ScrollList(self, height=4, background='white')
self.slist.list.bind('<ButtonRelease-1>', self.onListChange)
self.slist.grid(row=1, column=0, sticky="nsew")
self.adu = ADUPanel(self)
self.adu.grid(row=2, column=0, sticky="ew")
label = Label(self, text="Properties of selected filter:")
label.grid(row=3, column=0, sticky='w')
self.pe = PropertyEditor(self, height=60, borderwidth=1)
self.pe.grid(row=4, column=0, sticky='nsew')
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.rowconfigure(4, weight=1)
def setConfig(self, config):
self.config = config
#populate list of filters
flist = config.getFilters()
flist.sort(sortn)
self.slist.list.delete(0, END)
self.pe.body.clear()
for filt in flist:
self.slist.list.insert(END, filt.getPropValue("name"))
def onAdd(self):
self.pe.body.hideControls()
filt = self.config.getFilter(None)
self.slist.list.insert(END, filt.getProp("name").getValueText())
self.slist.list.select_clear(0, END)
i = self.slist.list.size()
self.slist.list.select_set(i - 1)
self.pe.setPropertyHolder(filt)
def onDelete(self):
self.pe.body.hideControls()
items = self.slist.list.curselection()
if not len(items):
showerror("No Item Selected", "You haven't selected anything to delete")
else:
name = self.slist.list.get(int(items[0]))
h = self.config.filterIsUsed(name)
if h:
showerror("Filter in use",
"The filter '%s' is being used by '%s'"\
", so it cannot be deleted." % (
name, h))
else:
self.config.removeFilter(name)
self.slist.list.delete(items)
self.pe.body.clear()
def onUpdate(self):
self.pe.body.hideControls()
def onListChange(self, event):
self.pe.body.hideControls()
items = self.slist.list.curselection()
name = self.slist.list.get(int(items[0]))
filt = self.config.getFilter(name)
self.pe.setPropertyHolder(filt)
class ConfigPanel(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.load = Button(self, text="Load...", command=parent.onLoad)
self.load.pack(side=LEFT)
self.save = Button(self, text="Save", command=parent.onSave)
self.save.pack(side=LEFT)
self.save = Button(self, text="Save as...", command=parent.onSaveAs)
self.save.pack(side=LEFT)
self.reset = Button(self, text="Reset", command=parent.onReset)
self.reset.pack(side=RIGHT)
class Configurator(Frame):
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.llist = LoggerPanel(self)
self.llist.grid(row=0, column=0, rowspan=2, sticky='nsew')
spacer = Canvas(self, width=2, highlightthickness=0)
spacer.grid(row=0, column=1, rowspan=2, sticky='ns')
self.hlist = HandlerPanel(self)
self.hlist.grid(row=0, column=2, sticky='nsew')
self.flist = FormatterPanel(self)
self.flist.grid(row=1, column=2, sticky='nsew')
self.cfg = ConfigPanel(self)
self.cfg.grid(row=2, column=0, columnspan=2, sticky='w')
self.filename = None
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.columnconfigure(2, weight=1)
label = Label(self, text="Copyright (C) 2002 Vinay Sajip. All rights reserved.", foreground='brown')
label.grid(row=3, column=0, columnspan=2, sticky='w')
if len(sys.argv) > 1:
fn = sys.argv[1]
try:
self.loadFile(fn)
except Exception, e:
print e
raise
else:
self.onReset(0)
self.setTitle()
self.focus_set()
def setTitle(self):
if self.filename:
s = os.path.split(self.filename)[1]
else:
s = "untitled"
self.winfo_toplevel().title("%s - Python Logging Configurator V%s" % (s, __version__))
def loadFile(self, fn):
self.config = LoggingConfig()
self.config.read(fn)
self.filename = fn
self.llist.setConfig(self.config)
self.hlist.setConfig(self.config)
self.flist.setConfig(self.config)
self.setTitle()
def onLoad(self):
fn = askopenfilename(title="Choose configuration file", filetypes=[("Logging configurations", "*.ini"), ("All files", "*.*")])
if fn:
self.loadFile(fn)
def onSaveAs(self):
if self.filename:
fn = os.path.split(self.filename)[1]
else:
fn = DEFAULT_FILENAME
fn = asksaveasfilename(title="Save configuration as", initialfile=fn, filetypes=[("Logging configurations", "*.ini"), ("All files", "*.*")])
if fn:
self.config.save(fn)
self.filename = fn
self.setTitle()
def onSave(self):
if not self.filename:
self.onSaveAs()
else:
self.config.save(self.filename)
def onReset(self, confirm=1):
if not confirm:
doit = 1
else:
doit = askyesno("Reset", "Are you sure you want to reset?")
if doit:
self.config = LoggingConfig()
self.llist.setConfig(self.config)
self.hlist.setConfig(self.config)
self.flist.setConfig(self.config)
self.setTitle()
# -- general properties
class NameProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "name", "Name", value)
class LevelProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "level", "Level", value)
def getChoices(self):
return LOGGING_LEVELS
# -- formatter properties
class FormatProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "format", "Format", value)
class DateFormatProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "datefmt", "Date Format", value)
class FormatterProxy(PropertyHolder):
def __init__(self, config, dict):
self.config = config
PropertyHolder.__init__(self, dict)
prop = NameProperty(dict.get("name", ""))
self.dict["name"] = prop
prop = FormatProperty(dict.get("format", "%(asctime)s %(levelname)s %(message)s"))
self.dict["format"] = prop
prop = DateFormatProperty(dict.get("datefmt", ""))
self.dict["datefmt"] = prop
self.propnames = ["name", "format", "datefmt"]
def isReadonly(self, name):
return name == "name"
def writeConfig(self, file):
file.write("[formatter_%s]\n" % self.getPropValue("name"))
file.write("format=%s\n" % self.getPropValue("format"))
file.write("datefmt=%s\n\n" % self.getPropValue("datefmt"))
# -- filter properties
class LoggerNameProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "lname", "Name", value)
class FilterProxy(PropertyHolder):
def __init__(self, config, dict):
self.config = config
PropertyHolder.__init__(self, dict)
prop = NameProperty(dict.get("name", ""))
self.dict["name"] = prop
prop = LoggerNameProperty(dict.get("lname", ""))
self.dict["lname"] = prop
self.propnames = ["name", "lname"]
def isReadonly(self, name):
return name == "name"
def writeConfig(self, file):
file.write("[filter_%s]\n" % self.getPropValue("name"))
file.write("lname=%s\n" % self.getPropValue("lname"))
# -- handler properties and proxies
class HandlerTypeProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "class", "Type", value)
def getChoices(self):
return HANDLER_TYPES
class FormatterProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "formatter", "Formatter", value)
def getChoices(self):
return self.config.getFormatterChoice()
class HandlerProxy(PropertyHolder):
def __init__(self, config, dict):
self.config = config
PropertyHolder.__init__(self, dict)
prop = NameProperty(dict.get("name", ""))
self.dict["name"] = prop
prop = HandlerTypeProperty(dict.get("class", "StreamHandlerProxy"))
self.dict["class"] = prop
prop = FormatterProperty(self.config, dict.get("formatter", ""))
self.dict["formatter"] = prop
prop = LevelProperty(dict.get("level", "NOTSET"))
self.dict["level"] = prop
self.propnames = ["name", "class", "level", "formatter"]
def isReadonly(self, name):
return (name == "name")
def setPropValue(self, name, value):
PropertyHolder.setPropValue(self, name, value)
if (name == "class"): #morph type of handler
#print "try morph -> %s" % value
try:
klass = eval(value)
except Exception, e:
print e
klass = None
if klass:
n = self.getPropValue("name")
d = {
"name": n,
"class": value,
"formatter": self.getPropValue("formatter"),
"level": self.getPropValue("level"),
}
newhand = klass(self.config, d)
self.config.handlers[n] = newhand #FIXME encapsulation
if self.onPropListChanged:
self.onPropListChanged(newhand)
def writeConfig(self, file):
file.write("[handler_%s]\n" % self.getPropValue("name"))
s = self.getProp("class").getValueText()
if not s in ["StreamHandler", "FileHandler"]:
s = "handlers." + s
file.write("class=%s\n" % s)
file.write("level=%s\n" % self.getPropValue("level"))
file.write("formatter=%s\n" % self.getPropValue("formatter"))
class StreamProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "stream", "Stream", value)
def getChoices(self):
return OUTPUT_STREAMS
class StreamHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = StreamProperty(self.config, dict.get("stream", "sys.stderr"))
self.dict["stream"] = prop
self.propnames.append("stream")
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
file.write("stream=%s\n" % self.getPropValue("stream"))
file.write("args=(%s,)\n\n" % self.getPropValue("stream"))
def readConfig(self, sectname):
prop = StreamProperty(self.config, self.config.get(sectname, "stream"))
self.dict["stream"] = prop
self.propnames.append("stream")
class FilenameProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "filename", "File name", value)
class ModeProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "mode", "Mode", value)
def getChoices(self):
return FILE_MODES
class MaxSizeProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "maxsize", "Maximum Size (bytes)", value)
class BackupCountProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "backcount", "Backup Count", value)
class FileHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = FilenameProperty(dict.get("filename", "python.log"))
self.dict["filename"] = prop
prop = ModeProperty(dict.get("mode", "a"))
self.dict["mode"] = prop
self.propnames.extend(["filename", "mode"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
fn = self.getPropValue("filename")
file.write("filename=%s\n" % fn)
mode = self.getPropValue("mode")
file.write("mode=%s\n" % mode)
file.write("args=('%s', '%s')\n\n" % (fn, mode))
def readConfig(self, sectname):
prop = FilenameProperty(self.config.get(sectname, "filename"))
self.dict["filename"] = prop
prop = ModeProperty(self.config.get(sectname, "mode"))
self.dict["mode"] = prop
self.propnames.extend(["filename", "mode"])
class RotatingFileHandlerProxy(FileHandlerProxy):
def __init__(self, config, dict):
FileHandlerProxy.__init__(self, config, dict)
prop = MaxSizeProperty(dict.get("maxsize", "0"))
self.dict["maxsize"] = prop
prop = BackupCountProperty(dict.get("backcount", "1"))
self.dict["backcount"] = prop
self.propnames.extend(["maxsize", "backcount"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
fn = self.getPropValue("filename")
file.write("filename=%s\n" % fn)
mode = self.getPropValue("mode")
file.write("mode=%s\n" % mode)
ms = self.getPropValue("maxsize")
file.write("maxsize=%s\n" % ms)
bc = self.getPropValue("backcount")
file.write("backcount=%s\n" % bc)
file.write("args=('%s', '%s', %s, %s)\n\n" % (fn, mode, ms, bc))
def readConfig(self, sectname):
FileHandlerProxy.readConfig(self, sectname)
prop = MaxSizeProperty(self.config.get(sectname, "maxsize"))
self.dict["maxsize"] = prop
prop = BackupCountProperty(self.config.get(sectname, "backcount"))
self.dict["backcount"] = prop
self.propnames.extend(["maxsize", "backcount"])
class HostProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "host", "Host", value)
class PortProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "port", "Port", value)
class SocketHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "handlers.DEFAULT_TCP_LOGGING_PORT"))
self.dict["port"] = prop
self.propnames.extend(["host", "port"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
file.write("args=('%s', %s)\n\n" % (host, port))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
self.propnames.extend(["host", "port"])
class DatagramHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "handlers.DEFAULT_UDP_LOGGING_PORT"))
self.dict["port"] = prop
self.propnames.extend(["host", "port"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
file.write("args=('%s', %s)\n\n" % (host, port))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
self.propnames.extend(["host", "port"])
class URLProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "url", "URL", value)
class MethodProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "method", "HTTP Method", value)
def getChoices(self):
return HTTP_METHODS
class HTTPHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "80"))
self.dict["port"] = prop
prop = URLProperty(dict.get("url", ""))
self.dict["url"] = prop
prop = MethodProperty(dict.get("method", "GET"))
self.dict["method"] = prop
self.propnames.extend(["host", "port", "url", "method"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
url = self.getPropValue("url")
file.write("url=%s\n" % url)
meth = self.getPropValue("method")
file.write("method=%s\n" % meth)
file.write("args=('%s:%s', '%s', '%s')\n\n" % (host, port, url, meth))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
prop = URLProperty(self.config.get(sectname, "url"))
self.dict["url"] = prop
prop = MethodProperty(self.config.get(sectname, "method"))
self.dict["method"] = prop
self.propnames.extend(["host", "port", "url", "method"])
class SOAPHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "80"))
self.dict["port"] = prop
prop = URLProperty(dict.get("url", ""))
self.dict["url"] = prop
self.propnames.extend(["host", "port", "url"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
url = self.getPropValue("url")
file.write("url=%s\n" % url)
file.write("args=('%s:%s', '%s')\n\n" % (host, port, url))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
prop = URLProperty(self.config.get(sectname, "url"))
self.dict["url"] = prop
self.propnames.extend(["host", "port", "url"])
class FacilityProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "facility", "Facility", value)
def getChoices(self):
return SYSLOG_FACILITIES
class SysLogHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "handlers.SYSLOG_UDP_PORT"))
self.dict["port"] = prop
prop = FacilityProperty(dict.get("facility", "handlers.SysLogHandler.LOG_USER"))
self.dict["facility"] = prop
self.propnames.extend(["host", "port", "facility"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
fac = self.getPropValue("facility")
file.write("facility=%s\n" % fac)
file.write("args=(('%s', %s), handlers.SysLogHandler.%s)\n\n" % (host, port, fac))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
prop = FacilityProperty(self.config.get(sectname, "facility"))
self.dict["facility"] = prop
self.propnames.extend(["host", "port", "facility"])
class FromProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "from", "From", value)
class ToProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "to", "To", value)
class SubjectProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "subject", "Subject", value)
class SMTPHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = HostProperty(dict.get("host", "localhost"))
self.dict["host"] = prop
prop = PortProperty(dict.get("port", "25"))
self.dict["port"] = prop
prop = FromProperty(dict.get("from", ""))
self.dict["from"] = prop
prop = ToProperty(dict.get("to", ""))
self.dict["to"] = prop
prop = SubjectProperty(dict.get("subject", ""))
self.dict["subject"] = prop
self.propnames.extend(["host", "port", "from", "to", "subject"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
host = self.getPropValue("host")
file.write("host=%s\n" % host)
port = self.getPropValue("port")
file.write("port=%s\n" % port)
frm = self.getPropValue("from")
file.write("from=%s\n" % frm)
to = self.getPropValue("to")
file.write("to=%s\n" % to)
subj = self.getPropValue("subject")
file.write("subject=%s\n" % subj)
to = string.split(to, ",")
file.write("args=('%s', '%s', %s, '%s')\n\n" % (host, frm, repr(to), subj))
def readConfig(self, sectname):
prop = HostProperty(self.config.get(sectname, "host"))
self.dict["host"] = prop
prop = PortProperty(self.config.get(sectname, "port"))
self.dict["port"] = prop
prop = FromProperty(self.config.get(sectname, "from"))
self.dict["from"] = prop
prop = ToProperty(self.config.get(sectname, "to"))
self.dict["to"] = prop
prop = SubjectProperty(self.config.get(sectname, "subject"))
self.dict["subject"] = prop
self.propnames.extend(["host", "port", "from", "to", "subject"])
class CapacityProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "capacity", "Capacity", value)
class FlushLevelProperty(LevelProperty):
def __init__(self, value=None):
Property.__init__(self, "flushlevel", "Flush Level", value)
class TargetProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "target", "Target", value)
def getChoices(self):
handlers = self.config.getHandlerChoice()
nm = self.dict["name"].getValueText()
#can't be own target...
return filter(lambda x,nm=nm: x[0] != nm, handlers)
class MemoryHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = CapacityProperty(dict.get("capacity", "10"))
self.dict["capacity"] = prop
prop = FlushLevelProperty(dict.get("flushlevel", "ERROR"))
self.dict["flushlevel"] = prop
prop = TargetProperty(config, dict.get("target", ""))
prop.dict = self.dict
self.dict["target"] = prop
self.propnames.extend(["capacity", "flushlevel", "target"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
cap = self.getPropValue("capacity")
file.write("capacity=%s\n" % cap)
flvl = self.getPropValue("flushlevel")
file.write("flushlevel=%s\n" % flvl)
file.write("target=%s\n" % self.getPropValue("target"))
file.write("args=(%s, %s)\n\n" % (cap, flvl))
def readConfig(self, sectname):
prop = CapacityProperty(self.config.get(sectname, "capacity"))
self.dict["capacity"] = prop
prop = FlushLevelProperty(self.config.get(sectname, "flushlevel"))
self.dict["flushlevel"] = prop
prop = TargetProperty(self.config, self.config.get(sectname, "target"))
prop.dict = self.dict
self.dict["target"] = prop
self.propnames.extend(["capacity", "flushlevel", "target"])
class AppNameProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "appname", "Application Name", value)
class DLLNameProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "dllname", "Message DLL name", value)
class LogTypeProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "logtype", "Log Type", value)
def getChoices(self):
return LOG_TYPES
class NTEventLogHandlerProxy(HandlerProxy):
def __init__(self, config, dict):
HandlerProxy.__init__(self, config, dict)
prop = AppNameProperty(dict.get("appname", "Python Application"))
self.dict["appname"] = prop
prop = DLLNameProperty(dict.get("dllname", ""))
self.dict["dllname"] = prop
prop = LogTypeProperty(dict.get("logtype", "Application"))
self.dict["logtype"] = prop
self.propnames.extend(["appname", "dllname", "logtype"])
def writeConfig(self, file):
HandlerProxy.writeConfig(self, file)
app = self.getPropValue("appname")
file.write("appname=%s\n" % app)
dll = self.getPropValue("dllname")
file.write("dllname=%s\n" % dll)
ltype = self.getPropValue("logtype")
file.write("logtype=%s\n" % ltype)
file.write("args=('%s', '%s', '%s')\n\n" % (app, dll, ltype))
def readConfig(self, sectname):
prop = AppNameProperty(self.config.get(sectname, "appname"))
self.dict["appname"] = prop
prop = DLLNameProperty(self.config.get(sectname, "dllname"))
self.dict["dllname"] = prop
prop = LogTypeProperty(self.config.get(sectname, "logtype"))
self.dict["logtype"] = prop
self.propnames.extend(["appname", "dllname", "logtype"])
# -- logger properties and proxies
class ChannelProperty(Property):
def __init__(self, value=None):
Property.__init__(self, "channel", "Name", value)
class HandlerProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "handler", "Handlers", value)
def getChoices(self):
return self.config.getHandlerChoice()
class FilterProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "filter", "Filters", value)
def getChoices(self):
return self.config.getFilterChoice()
class ParentProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "parent", "Parent", value)
def getChoices(self):
loggers = self.config.getLoggerChoice()
nm = self.dict["name"].getValueText()
#can't be own parent...
return filter(lambda x,nm=nm: x[0] != nm, loggers)
def getValueText(self):
if self.dict.has_key("root"):
return ""
pn = Property.getValueText(self)
rv = ""
while pn != "(root)":
parent = self.config.getLogger(pn)
rv = parent.getPropValue("channel") + "." + rv
pn = parent.getProp("parent").value
return rv[:-1]
class PropagateProperty(Property):
def __init__(self, config, value=None):
self.config = config
Property.__init__(self, "propagate", "Propagate", value)
def getChoices(self):
return BOOLEAN_VALUES
class LoggerProxy(PropertyHolder):
def __init__(self, config, dict):
self.config = config
PropertyHolder.__init__(self, dict)
prop = ChannelProperty(dict.get("channel", ""))
self.dict["channel"] = prop
prop = NameProperty(dict.get("name", ""))
self.dict["name"] = prop
prop = HandlerProperty(config, dict.get("handler", []))
self.dict["handler"] = prop
prop = LevelProperty(dict.get("level", "NOTSET"))
self.dict["level"] = prop
prop = PropagateProperty(self.config, dict.get("propagate", "1"))
self.dict["propagate"] = prop
prop = ParentProperty(config, dict.get("parent", "(root)"))
prop.dict = self.dict
self.dict["parent"] = prop
self.propnames = ["parent", "channel", "level", "propagate", "handler"]
self.onChannelChanged = None
self.deleted = 0
def isReadonly(self, name):
return (name in ["channel", "parent", "propagate"]) and self.dict.has_key("root")
def getQualifiedName(self):
pt = self.getProp("parent").getValueText()
nm = self.getPropValue("channel")
if pt:
pn = pt + "." + nm
else:
pn = nm
if pn == "":
pn = "(root)"
return pn
def setPropValue(self, name, value):
PropertyHolder.setPropValue(self, name, value)
if (name == "channel"):
nm = self.getPropValue("name")
if self.onChannelChanged:
self.onChannelChanged(nm, value)
def writeConfig(self, file):
if self.dict.has_key("root"):
name = "root"
else:
name = self.getPropValue("name")
file.write("[logger_%s]\n" % name)
file.write("level=%s\n" % self.getPropValue("level"))
file.write("propagate=%s\n" % self.getPropValue("propagate"))
file.write("channel=%s\n" % self.getPropValue("channel"))
file.write("parent=%s\n" % self.getPropValue("parent"))
file.write("qualname=%s\n" % self.getQualifiedName())
file.write("handlers=%s\n\n" % string.join(self.getPropValue("handler"), ","))
# -- logging configuration
class LoggingConfig(ConfigParser.ConfigParser):
def __init__(self, defaults=None):
ConfigParser.ConfigParser.__init__(self, defaults)
self.formatters = {}
self.handlers = {}
self.loggers = {}
# self.filters = {}
#create root logger
d = { "name": "(root)", "root": 1, "parent": "" }
self.loggers["(root)"] = LoggerProxy(self, d)
def read(self, fn):
ConfigParser.ConfigParser.read(self, fn)
llist = self.get("loggers", "keys")
llist = string.split(llist, ",")
llist.remove("root")
sectname = "logger_root"
log = self.loggers["(root)"]
log.setPropValue("level", self.get(sectname, "level"))
hlist = self.get(sectname, "handlers")
hlist = string.split(hlist, ",")
log.setPropValue("handler", hlist)
for log in llist:
sectname = "logger_%s" % log
hlist = self.get(sectname, "handlers")
hlist = string.split(hlist, ",")
d = {
"name" : log,
"level" : self.get(sectname, "level"),
"channel" : self.get(sectname, "channel"),
"parent" : self.get(sectname, "parent"),
"propagate" : self.get(sectname, "propagate"),
"handler" : hlist,
}
self.loggers[log] = LoggerProxy(self, d)
hlist = self.get("handlers", "keys")
if len(hlist):
hlist = string.split(hlist, ",")
for hand in hlist:
sectname = "handler_%s" % hand
klass = self.get(sectname, "class")
if klass[:9] == "handlers.":
klass = klass[9:]
d = {
"name" : hand,
"class" : "%sProxy" % klass,
"level" : self.get(sectname, "level"),
"formatter" : self.get(sectname, "formatter"),
}
hobj = HandlerProxy(self, d)
hobj.__class__ = eval("%sProxy" % klass)
hobj.readConfig(sectname)
self.handlers[hand] = hobj
flist = self.get("formatters", "keys")
if len(flist):
flist = string.split(flist, ",")
for form in flist:
sectname = "formatter_%s" % form
d = {
"name" : form,
"format" : self.get(sectname, "format", 1),
"datefmt" : self.get(sectname, "datefmt", 1),
}
self.formatters[form] = FormatterProxy(self, d)
# flist = self.get("filters", "keys")
# if len(flist):
# flist = string.split(flist, ",")
# for filt in flist:
# sectname = "filter_%s" % filt
# d = {
# "name" : filt,
# "lname" : self.get(sectname, "lname", 1),
# }
# self.filters[filt] = FilterProxy(self, d)
def getFormatter(self, name):
if name:
fmt = self.formatters[name]
else:
n = len(self.formatters.keys()) + 1
name = "form%02d" % n
fmt = FormatterProxy(self, {"name": name})
self.formatters[name] = fmt
return fmt
def getHandler(self, name):
if name:
hand = self.handlers[name]
else:
n = len(self.handlers.keys()) + 1
name = "hand%02d" % n
hand = StreamHandlerProxy(self, {"name": name})
self.handlers[name] = hand
return hand
def getLogger(self, name):
if name:
log = self.loggers[name]
else:
n = len(self.loggers.keys()) + 1
name = "log%02d" % n
log = LoggerProxy(self, {"name": name, "channel": name})
self.loggers[name] = log
return log
def getFormatterChoice(self):
values = []
keys = self.formatters.keys()
keys.sort()
for f in keys:
values.append((f, f))
return tuple(values)
def getHandlerChoice(self):
values = []
keys = self.handlers.keys()
keys.sort()
for f in keys:
values.append((f, f))
return tuple(values)
def getFilterChoice(self):
values = []
keys = self.filters.keys()
keys.sort()
for f in keys:
values.append((f, f))
return tuple(values)
def getLoggerChoice(self):
values = []
keys = self.loggers.keys()
keys.sort()
for f in keys:
values.append((f, f))
return tuple(values)
def getLoggers(self):
return self.loggers.values()
def getHandlers(self):
return self.handlers.values()
def getFormatters(self):
return self.formatters.values()
def formatterIsUsed(self, name):
rv = None
for h in self.handlers.keys():
if self.handlers[h].getPropValue("formatter") == name:
rv = h
break
return rv
def handlerIsUsed(self, name):
rv = None
for log in self.loggers.keys():
if name in self.loggers[log].getPropValue("handler"):
rv = log
break
return rv
def removeFormatter(self, name):
del self.formatters[name]
def removeHandler(self, name):
del self.handlers[name]
def removeLogger(self, name):
del self.loggers[name]
def save(self, fn):
#needed because 1.5.2 ConfigParser should be supported
file = open(fn, "w")
#Write out the keys
loggers = self.loggers.keys()
loggers.remove("(root)")
loggers = filter(lambda x, d=self.loggers: not d[x].deleted, loggers)
loggers.sort()
list = ["root"]
list.extend(loggers)
file.write("[loggers]\nkeys=%s\n\n" % string.join(list, ","))
handlers = self.handlers.keys()
handlers.sort()
file.write("[handlers]\nkeys=%s\n\n" % string.join(handlers, ","))
formatters = self.formatters.keys()
formatters.sort()
file.write("[formatters]\nkeys=%s\n\n" % string.join(formatters, ","))
#write out the root logger properties
log = self.loggers["(root)"]
log.writeConfig(file)
#write out other logger properties
for log in loggers:
log = self.loggers[log]
log.writeConfig(file)
#write out handler properties
for hand in handlers:
hand = self.handlers[hand]
hand.writeConfig(file)
#write out formatter properties
for form in formatters:
form = self.formatters[form]
form.writeConfig(file)
file.close()
root = None
def onClose():
if _popup:
_popup.withdraw()
root.destroy()
def main():
global root
root=Tk()
cfg = Configurator(root)
cfg.pack(side=LEFT, fill=BOTH, expand=1)
root.protocol("WM_DELETE_WINDOW", onClose)
root.mainloop()
if __name__ == "__main__":
main() |
yize/grunt-tps | refs/heads/master | tasks/lib/python/Lib/python2.7/encodings/cp500.py | 593 | """ Python Character Mapping Codec cp500 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP500.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp500',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'[' # 0x4A -> LEFT SQUARE BRACKET
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u']' # 0x5A -> RIGHT SQUARE BRACKET
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
openmv/micropython | refs/heads/master | tests/wipy/time.py | 14 | import time
DAYS_PER_MONTH = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def is_leap(year):
return (year % 4) == 0
def test():
seconds = 0
wday = 5 # Jan 1, 2000 was a Saturday
for year in range(2000, 2049):
print("Testing %d" % year)
yday = 1
for month in range(1, 13):
if month == 2 and is_leap(year):
DAYS_PER_MONTH[2] = 29
else:
DAYS_PER_MONTH[2] = 28
for day in range(1, DAYS_PER_MONTH[month] + 1):
secs = time.mktime((year, month, day, 0, 0, 0, 0, 0))
if secs != seconds:
print(
"mktime failed for %d-%02d-%02d got %d expected %d"
% (year, month, day, secs, seconds)
)
tuple = time.localtime(seconds)
secs = time.mktime(tuple)
if secs != seconds:
print(
"localtime failed for %d-%02d-%02d got %d expected %d"
% (year, month, day, secs, seconds)
)
return
seconds += 86400
if yday != tuple[7]:
print(
"locatime for %d-%02d-%02d got yday %d, expecting %d"
% (year, month, day, tuple[7], yday)
)
return
if wday != tuple[6]:
print(
"locatime for %d-%02d-%02d got wday %d, expecting %d"
% (year, month, day, tuple[6], wday)
)
return
yday += 1
wday = (wday + 1) % 7
def spot_test(seconds, expected_time):
actual_time = time.localtime(seconds)
for i in range(len(actual_time)):
if actual_time[i] != expected_time[i]:
print(
"time.localtime(", seconds, ") returned", actual_time, "expecting", expected_time
)
return
print("time.localtime(", seconds, ") returned", actual_time, "(pass)")
test()
# fmt: off
spot_test( 0, (2000, 1, 1, 0, 0, 0, 5, 1))
spot_test( 1, (2000, 1, 1, 0, 0, 1, 5, 1))
spot_test( 59, (2000, 1, 1, 0, 0, 59, 5, 1))
spot_test( 60, (2000, 1, 1, 0, 1, 0, 5, 1))
spot_test( 3599, (2000, 1, 1, 0, 59, 59, 5, 1))
spot_test( 3600, (2000, 1, 1, 1, 0, 0, 5, 1))
spot_test( -1, (1999, 12, 31, 23, 59, 59, 4, 365))
spot_test( 447549467, (2014, 3, 7, 23, 17, 47, 4, 66))
spot_test( -940984933, (1970, 3, 7, 23, 17, 47, 5, 66))
spot_test(-1072915199, (1966, 1, 1, 0, 0, 1, 5, 1))
spot_test(-1072915200, (1966, 1, 1, 0, 0, 0, 5, 1))
spot_test(-1072915201, (1965, 12, 31, 23, 59, 59, 4, 365))
# fmt: on
t1 = time.time()
time.sleep(2)
t2 = time.time()
print(abs(time.ticks_diff(t1, t2) - 2) <= 1)
t1 = time.ticks_ms()
time.sleep_ms(50)
t2 = time.ticks_ms()
print(abs(time.ticks_diff(t1, t2) - 50) <= 1)
t1 = time.ticks_us()
time.sleep_us(1000)
t2 = time.ticks_us()
print(time.ticks_diff(t1, t2) < 1500)
print(time.ticks_diff(time.ticks_cpu(), time.ticks_cpu()) < 16384)
|
40223210/w16b_test | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/decimal.py | 623 | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module should be kept in sync with the latest updates of the
# IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
import copy as _copy
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
#brython fixme
pass
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
#brython fix me
pass
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
#brython fix me
pass
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
#brython fix me
pass
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
#brython fix me
pass
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
#brython fix me
pass
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.current_thread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.current_thread(), '__decimal_context__'):
del threading.current_thread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.current_thread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.current_thread().__decimal_context__
except AttributeError:
context = Context()
threading.current_thread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
value=value.strip().lower()
if value.startswith("-"):
self._sign = 1
value=value[1:]
else:
self._sign = 0
if value in ('', 'nan'):
self._is_special = True
self._int = ''
#if m.group('signal'): #figure out what a signaling NaN is later
# self._exp = 'N'
#else:
# self._exp = 'n'
self._exp='n'
return self
if value in ('inf', 'infinity'):
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
import _jsre as re
_m=re.match("^\d*\.?\d*(e\+?\d*)?$", value)
if not _m:
self._is_special = True
self._int = ''
self._exp='n'
return self
if '.' in value:
intpart, fracpart=value.split('.')
if 'e' in fracpart:
fracpart, exp=fracpart.split('e')
exp=int(exp)
else:
exp=0
#self._int = str(int(intpart+fracpart))
self._int = intpart+fracpart
self._exp = exp - len(fracpart)
self._is_special = False
return self
else:
#is this a pure int?
self._is_special = False
if 'e' in value:
self._int, _exp=value.split('e')
self._exp=int(_exp)
#print(self._int, self._exp)
else:
self._int = value
self._exp = 0
return self
#m = _parser(value.strip())
#if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
#if m.group('sign') == "-":
# self._sign = 1
#else:
# self._sign = 0
#intpart = m.group('int')
#if intpart is not None:
# # finite number
# fracpart = m.group('frac') or ''
# exp = int(m.group('exp') or '0')
# self._int = str(int(intpart+fracpart))
# self._exp = exp - len(fracpart)
# self._is_special = False
#else:
# diag = m.group('diag')
# if diag is not None:
# # NaN
# self._int = str(int(diag or '0')).lstrip('0')
# if m.group('signal'):
# self._exp = 'N'
# else:
# self._exp = 'n'
# else:
# # infinity
# self._int = '0'
# self._exp = 'F'
# self._is_special = True
#return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
return cls(f)
if not isinstance(f, float):
raise TypeError("argument must be int or float.")
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other, context=None):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other, context=None):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other, context=None):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other, context=None):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical()
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=17, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=308,
Emin=-324,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
#import re
#_parser = re.compile(r""" # A numeric string consists of:
# \s*
# (?P<sign>[-+])? # an optional sign, followed by either...
# (
# (?=\d|\.\d) # ...a number (with at least one digit)
# (?P<int>\d*) # having a (possibly empty) integer part
# (\.(?P<frac>\d*))? # followed by an optional fractional part
# (E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
# |
# Inf(inity)? # ...an infinity, or...
# |
# (?P<signal>s)? # ...an (optionally signaling)
# NaN # NaN
# (?P<diag>\d*) # with (possibly empty) diagnostic info.
# )
# \s*
# \Z
#""", re.VERBOSE | re.IGNORECASE).match
import _jsre as re
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
#_parse_format_specifier_regex = re.compile(r"""\A
#(?:
# (?P<fill>.)?
# (?P<align>[<>=^])
#)?
#(?P<sign>[-+ ])?
#(?P<alt>\#)?
#(?P<zeropad>0)?
#(?P<minimumwidth>(?!0)\d+)?
#(?P<thousands_sep>,)?
#(?:\.(?P<precision>0|(?!0)\d+))?
#(?P<type>[eEfFgGn%])?
#\Z
#""", re.VERBOSE|re.DOTALL)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
try:
import _decimal
except ImportError:
pass
else:
s1 = set(dir())
s2 = set(dir(_decimal))
for name in s1 - s2:
del globals()[name]
del s1, s2, name
from _decimal import *
if __name__ == '__main__':
import doctest, decimal
doctest.testmod(decimal)
|
kenglishhi/gae-django-sandbox | refs/heads/master | django/core/cache/backends/locmem.py | 19 | "Thread-safe in-memory cache backend."
import time
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.cache.backends.base import BaseCache
from django.utils.synch import RWLock
class CacheClass(BaseCache):
def __init__(self, _, params):
BaseCache.__init__(self, params)
self._cache = {}
self._expire_info = {}
max_entries = params.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', 3)
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self._lock = RWLock()
def add(self, key, value, timeout=None):
self._lock.writer_enters()
try:
exp = self._expire_info.get(key)
if exp is None or exp <= time.time():
try:
self._set(key, pickle.dumps(value), timeout)
return True
except pickle.PickleError:
pass
return False
finally:
self._lock.writer_leaves()
def get(self, key, default=None):
self._lock.reader_enters()
try:
exp = self._expire_info.get(key)
if exp is None:
return default
elif exp > time.time():
try:
return pickle.loads(self._cache[key])
except pickle.PickleError:
return default
finally:
self._lock.reader_leaves()
self._lock.writer_enters()
try:
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
finally:
self._lock.writer_leaves()
def _set(self, key, value, timeout=None):
if len(self._cache) >= self._max_entries:
self._cull()
if timeout is None:
timeout = self.default_timeout
self._cache[key] = value
self._expire_info[key] = time.time() + timeout
def set(self, key, value, timeout=None):
self._lock.writer_enters()
# Python 2.4 doesn't allow combined try-except-finally blocks.
try:
try:
self._set(key, pickle.dumps(value), timeout)
except pickle.PickleError:
pass
finally:
self._lock.writer_leaves()
def has_key(self, key):
self._lock.reader_enters()
try:
exp = self._expire_info.get(key)
if exp is None:
return False
elif exp > time.time():
return True
finally:
self._lock.reader_leaves()
self._lock.writer_enters()
try:
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
finally:
self._lock.writer_leaves()
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key):
self._lock.writer_enters()
try:
self._delete(key)
finally:
self._lock.writer_leaves()
def clear(self):
self._cache.clear()
self._expire_info.clear()
|
ernestj/pitft | refs/heads/pitft | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
sgerhart/ansible | refs/heads/maintenance_policy_module | lib/ansible/modules/packaging/os/swdepot.py | 64 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Raul Melo
# Written by Raul Melo <raulmelo@gmail.com>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: swdepot
short_description: Manage packages with swdepot package manager (HP-UX)
description:
- Will install, upgrade and remove packages with swdepot package manager (HP-UX)
version_added: "1.4"
notes: []
author: "Raul Melo (@melodous)"
options:
name:
description:
- package name.
required: true
version_added: 1.4
state:
description:
- whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: true
choices: [ 'present', 'latest', 'absent']
version_added: 1.4
depot:
description:
- The source repository from which install or upgrade a package.
version_added: 1.4
'''
EXAMPLES = '''
- swdepot:
name: unzip-6.0
state: installed
depot: 'repository:/path'
- swdepot:
name: unzip
state: latest
depot: 'repository:/path'
- swdepot:
name: unzip
state: absent
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import shlex_quote
def compare_package(version1, version2):
""" Compare version packages.
Return values:
-1 first minor
0 equal
1 first greater """
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")]
normalized_version1 = normalize(version1)
normalized_version2 = normalize(version2)
if normalized_version1 == normalized_version2:
rc = 0
elif normalized_version1 < normalized_version2:
rc = -1
else:
rc = 1
return rc
def query_package(module, name, depot=None):
""" Returns whether a package is installed or not and version. """
cmd_list = '/usr/sbin/swlist -a revision -l product'
if depot:
rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, shlex_quote(depot), shlex_quote(name), shlex_quote(name)),
use_unsafe_shell=True)
else:
rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, shlex_quote(name), shlex_quote(name)), use_unsafe_shell=True)
if rc == 0:
version = re.sub(r"\s\s+|\t", " ", stdout).strip().split()[1]
else:
version = None
return rc, version
def remove_package(module, name):
""" Uninstall package if installed. """
cmd_remove = '/usr/sbin/swremove'
rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name))
if rc == 0:
return rc, stdout
else:
return rc, stderr
def install_package(module, depot, name):
""" Install package if not already installed """
cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false'
rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name))
if rc == 0:
return rc, stdout
else:
return rc, stderr
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['pkg'], required=True),
state=dict(choices=['present', 'absent', 'latest'], required=True),
depot=dict(default=None, required=False)
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
depot = module.params['depot']
changed = False
msg = "No changed"
rc = 0
if (state == 'present' or state == 'latest') and depot is None:
output = "depot parameter is mandatory in present or latest task"
module.fail_json(name=name, msg=output, rc=rc)
# Check local version
rc, version_installed = query_package(module, name)
if not rc:
installed = True
msg = "Already installed"
else:
installed = False
if (state == 'present' or state == 'latest') and installed is False:
if module.check_mode:
module.exit_json(changed=True)
rc, output = install_package(module, depot, name)
if not rc:
changed = True
msg = "Package installed"
else:
module.fail_json(name=name, msg=output, rc=rc)
elif state == 'latest' and installed is True:
# Check depot version
rc, version_depot = query_package(module, name, depot)
if not rc:
if compare_package(version_installed, version_depot) == -1:
if module.check_mode:
module.exit_json(changed=True)
# Install new version
rc, output = install_package(module, depot, name)
if not rc:
msg = "Package upgraded, Before " + version_installed + " Now " + version_depot
changed = True
else:
module.fail_json(name=name, msg=output, rc=rc)
else:
output = "Software package not in repository " + depot
module.fail_json(name=name, msg=output, rc=rc)
elif state == 'absent' and installed is True:
if module.check_mode:
module.exit_json(changed=True)
rc, output = remove_package(module, name)
if not rc:
changed = True
msg = "Package removed"
else:
module.fail_json(name=name, msg=output, rc=rc)
if module.check_mode:
module.exit_json(changed=False)
module.exit_json(changed=changed, name=name, state=state, msg=msg)
if __name__ == '__main__':
main()
|
kevinmarks/mentiontech | refs/heads/master | requests/packages/urllib3/packages/ordered_dict.py | 2039 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
yhoshino11/pytest_example | refs/heads/master | .tox/py27/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwfreq.py | 3132 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
|
dkubiak789/odoo | refs/heads/8.0 | addons/mrp_byproduct/mrp_byproduct.py | 108 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class mrp_subproduct(osv.osv):
_name = 'mrp.subproduct'
_description = 'Byproduct'
_columns={
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'subproduct_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Quantity Type', required=True, help="Define how the quantity of byproducts will be set on the production orders using this BoM.\
'Fixed' depicts a situation where the quantity of created byproduct is always equal to the quantity set on the BoM, regardless of how many are created in the production order.\
By opposition, 'Variable' means that the quantity will be computed as\
'(quantity of byproduct set on the BoM / quantity of manufactured product set on the BoM * quantity of manufactured product in the production order.)'"),
'bom_id': fields.many2one('mrp.bom', 'BoM', ondelete='cascade'),
}
_defaults={
'subproduct_type': 'variable',
'product_qty': lambda *a: 1.0,
}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {'product_uom': prod.uom_id.id}
return {'value': v}
return {}
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value':{}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
class mrp_bom(osv.osv):
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit='mrp.bom'
_columns={
'sub_products':fields.one2many('mrp.subproduct', 'bom_id', 'Byproducts', copy=True),
}
class mrp_production(osv.osv):
_description = 'Production'
_inherit= 'mrp.production'
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order and calculates quantity based on subproduct_type.
@return: Newly generated picking Id.
"""
move_obj = self.pool.get('stock.move')
picking_id = super(mrp_production,self).action_confirm(cr, uid, ids, context=context)
product_uom_obj = self.pool.get('product.uom')
for production in self.browse(cr, uid, ids):
source = production.product_id.property_stock_production.id
if not production.bom_id:
continue
for sub_product in production.bom_id.sub_products:
product_uom_factor = product_uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.bom_id.product_uom.id)
qty1 = sub_product.product_qty
qty2 = production.product_uos and production.product_uos_qty or False
product_uos_factor = 0.0
if qty2 and production.bom_id.product_uos.id:
product_uos_factor = product_uom_obj._compute_qty(cr, uid, production.product_uos.id, production.product_uos_qty, production.bom_id.product_uos.id)
if sub_product.subproduct_type == 'variable':
if production.product_qty:
qty1 *= product_uom_factor / (production.bom_id.product_qty or 1.0)
if production.product_uos_qty:
qty2 *= product_uos_factor / (production.bom_id.product_uos_qty or 1.0)
data = {
'name': 'PROD:'+production.name,
'date': production.date_planned,
'product_id': sub_product.product_id.id,
'product_uom_qty': qty1,
'product_uom': sub_product.product_uom.id,
'product_uos_qty': qty2,
'product_uos': production.product_uos and production.product_uos.id or False,
'location_id': source,
'location_dest_id': production.location_dest_id.id,
'move_dest_id': production.move_prod_id.id,
'production_id': production.id
}
move_id = move_obj.create(cr, uid, data, context=context)
move_obj.action_confirm(cr, uid, [move_id], context=context)
return picking_id
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
"""Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but with
the module mrp_byproduct installed it can differ for byproducts having type 'variable'.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Identify the product to produce.
:return: The factor to apply to the quantity that we should produce for the given production order and stock move.
"""
sub_obj = self.pool.get('mrp.subproduct')
move_obj = self.pool.get('stock.move')
production_obj = self.pool.get('mrp.production')
production_browse = production_obj.browse(cr, uid, production_id, context=context)
move_browse = move_obj.browse(cr, uid, move_id, context=context)
subproduct_factor = 1
sub_id = sub_obj.search(cr, uid,[('product_id', '=', move_browse.product_id.id),('bom_id', '=', production_browse.bom_id.id), ('subproduct_type', '=', 'variable')], context=context)
if sub_id:
subproduct_record = sub_obj.browse(cr ,uid, sub_id[0], context=context)
if subproduct_record.bom_id.product_qty:
subproduct_factor = subproduct_record.product_qty / subproduct_record.bom_id.product_qty
return subproduct_factor
return super(mrp_production, self)._get_subproduct_factor(cr, uid, production_id, move_id, context=context)
class change_production_qty(osv.osv_memory):
_inherit = 'change.production.qty'
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
bom_obj = self.pool.get('mrp.bom')
move_lines_obj = self.pool.get('stock.move')
prod_obj = self.pool.get('mrp.production')
for m in prod.move_created_ids:
if m.product_id.id == prod.product_id.id:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
else:
for sub_product_line in prod.bom_id.sub_products:
if sub_product_line.product_id.id == m.product_id.id:
factor = prod_obj._get_subproduct_factor(cr, uid, prod.id, m.id, context=context)
subproduct_qty = sub_product_line.subproduct_type == 'variable' and qty * factor or sub_product_line.product_qty
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': subproduct_qty})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
lucafavatella/intellij-community | refs/heads/cli-wip | python/lib/Lib/site-packages/django/test/utils.py | 185 | import sys
import time
import os
import warnings
from django.conf import settings
from django.core import mail
from django.core.mail.backends import locmem
from django.test import signals
from django.template import Template
from django.utils.translation import deactivate
__all__ = ('Approximate', 'ContextList', 'setup_test_environment',
'teardown_test_environment', 'get_runner')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val-other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, basestring):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
signals.template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template.original_render = Template._render
Template._render = instrumented_test_render
mail.original_SMTPConnection = mail.SMTPConnection
mail.SMTPConnection = locmem.EmailBackend
mail.original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template.original_render
del Template.original_render
mail.SMTPConnection = mail.original_SMTPConnection
del mail.original_SMTPConnection
settings.EMAIL_BACKEND = mail.original_email_backend
del mail.original_email_backend
del mail.outbox
def get_warnings_state():
"""
Returns an object containing the state of the warnings module
"""
# There is no public interface for doing this, but this implementation of
# get_warnings_state and restore_warnings_state appears to work on Python
# 2.4 to 2.7.
return warnings.filters[:]
def restore_warnings_state(state):
"""
Restores the state of the warnings module when passed an object that was
returned by get_warnings_state()
"""
warnings.filters = state[:]
def get_runner(settings):
test_path = settings.TEST_RUNNER.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
|
50wu/gpdb | refs/heads/master | gpMgmt/bin/gppylib/test/unit/test_unit_gplog.py | 7 | #!/usr/bin/env python3
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
import logging
import gplog
from test.unit.gp_unittest import GpTestCase, run_tests
class GplogTestCase(GpTestCase):
def test_basics(self):
logger = gplog.get_default_logger()
self.assertTrue(logger is not None)
def test_set_loglevels(self):
logger = gplog.get_default_logger()
self.assertTrue(logger.getEffectiveLevel() == logging.INFO)
gplog.enable_verbose_logging()
self.assertTrue(logger.getEffectiveLevel() == logging.DEBUG)
def test_log_to_file_only_is_ok_when_not_initialized(self):
gplog._LOGGER = None
gplog.log_to_file_only("should not crash")
def test_log_to_file_only_is_ok_when_stdout_not_initialized(self):
gplog._LOGGER = None
gplog.get_unittest_logger()
gplog.log_to_file_only("should not crash")
if __name__ == '__main__':
run_tests()
|
Team-T2-NMR/NMR-T2 | refs/heads/master | features_rdkit.py | 1 | from __future__ import print_function
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Descriptors
def example_mol():
mol = Chem.MolFromSmiles('CCC(=O)C(O)=O')
return mol
def hydrogen_from_carbon(mol, atom):
"""Return a proton bonded to atom"""
foundH = None
hCount = 0
for bond in atom.GetBonds():
assert isinstance(bond, rdkit.Chem.rdchem.Bond)
atom1 = bond.GetBeginAtom()
if atom1.GetAtomicNum() == 1:
foundH = atom1
hCount += 1
atom2 = bond.GetEndAtom()
if atom2.GetAtomicNum() == 1:
foundH = atom2
hCount += 1
assert foundH is not None
assert 1 <= hCount <= 3
return foundH
def get_sphere(atom, start, end=None):
"""
Return all atoms in the start:end sphere starting from (centered at) atom
Examples:
(atom, 1): 1st sphere ie. immediate neighbours only
(atom, 5) or (atom, 5, 5): 5th sphere only
(atom, 5, 6): 5th + 6th spheres
"""
if end is None:
end = start
end += 1
excluded = [atom]
def already_excluded(a):
for other in excluded:
if a.GetIdx() == other.GetIdx():
return True
return False
last_sphere = [atom]
for i in range(start - 1):
neighbours = [a.GetNeighbors() for a in last_sphere]
neighbours = [a for b in neighbours for a in b]
last_sphere = [n for n in neighbours if not already_excluded(n)]
excluded += last_sphere
included = []
def already_included(a):
for other in included:
if a.GetIdx() == other.GetIdx():
return True
return False
for i in range(start, end):
neighbours = [a.GetNeighbors() for a in last_sphere]
neighbours = [a for b in neighbours for a in b]
last_sphere = [n for n in neighbours if (not already_excluded(n)) and (not already_included(n))]
included += last_sphere
return included
def gasteiger_charges(atoms):
return [float(atom.GetProp('_GasteigerCharge')) for atom in atoms]
def min_max_avg_charge(atoms):
charges = gasteiger_charges(atoms)
return [min(charges), max(charges), sum(charges)/len(charges)]
def get_gasteiger_features(atom):
features = []
features += [atom.GetProp('_GasteigerCharge')]
for sphere_num in [1,2,3,4]:
sphere = get_sphere(atom, sphere_num)
features += min_max_avg_charge(sphere)
return features
def num_C(atoms):
return len([atom for atom in atoms if atom.GetAtomicNum() == 6])
def num_N(atoms):
return len([atom for atom in atoms if atom.GetAtomicNum() == 7])
def num_O(atoms):
return len([atom for atom in atoms if atom.GetAtomicNum() == 8])
def num_F(atoms):
return len([atom for atom in atoms if atom.GetAtomicNum() == 9])
def num_S(atoms):
return len([atom for atom in atoms if atom.GetAtomicNum() == 16])
def num_Cl(atoms):
return len([atom for atom in atoms if atom.GetAtomicNum() == 17])
def num_Br(atoms):
return len([atom for atom in atoms if atom.GetAtomicNum() == 35])
def num_I(atoms):
return len([atom for atom in atoms if atom.GetAtomicNum() == 53])
def num_aromatic(atoms):
return len([atom for atom in atoms if atom.GetIsAromatic()])
def num_in_ring(atoms):
return len([atom for atom in atoms if atom.IsInRing()])
def get_topological_features(atom):
features = []
for sphere_num in [1,2,3,4]:
sphere = get_sphere(atom, sphere_num)
for scalar_func in [num_C, num_N, num_O, num_F, num_S, num_Cl, num_Br, num_I, num_aromatic, num_in_ring]:
features.append(scalar_func(sphere))
return features
def features_for_atom(mol, atom_index):
mol = Chem.AddHs(mol)
assert isinstance(mol, rdkit.Chem.rdchem.Mol)
atom = mol.GetAtomWithIdx(atom_index)
assert isinstance(atom, rdkit.Chem.rdchem.Atom)
AllChem.ComputeGasteigerCharges(mol)
atom = hydrogen_from_carbon(mol, atom)
gasteiger_features = get_gasteiger_features(atom)
topological_features = get_topological_features(atom)
print(len(gasteiger_features + topological_features),'features so far')
if __name__ == '__main__':
mol = example_mol()
features_for_atom(mol,1)
|
hkchenhongyi/django | refs/heads/master | django/db/models/expressions.py | 33 | import copy
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q, refs_aggregate
from django.utils import six, timezone
from django.utils.functional import cached_property
class Combinable(object):
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rdiv__(self, other): # Python 2 compatibility
return type(self).__rtruediv__(self, other)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression(object):
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
def __init__(self, output_field=None):
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, six.string_types) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(Expression, self).as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self):
"""
Hook used by Field.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def refs_aggregate(self, existing_aggregates):
"""
Does this expression contain a reference to some of the
existing aggregates? If so, returns the aggregate and also
the lookup parts that *weren't* found. So, if
exsiting_aggregates = {'max_id': Max('id')}
self.name = 'max_id'
queryset.filter(max_id__range=[10,100])
then this method will return Max('id') and those parts of the
name that weren't found. In this case `max_id` is found and the range
portion is returned as ('range',).
"""
for node in self.get_source_expressions():
agg, lookup = node.refs_aggregate(existing_aggregates)
if agg:
return agg, lookup
return False, ()
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
def reverse_ordering(self):
return self
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super(CombinedExpression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField')
or (rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def refs_aggregate(self, existing_aggregates):
return refs_aggregate(self.name.split(LOOKUP_SEP), existing_aggregates)
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
class Func(Expression):
"""
A SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
def __init__(self, *expressions, **extra):
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
if function is None:
self.extra['function'] = self.extra.get('function', self.function)
else:
self.extra['function'] = function
self.extra['expressions'] = self.extra['field'] = self.arg_joiner.join(sql_parts)
template = template or self.extra.get('template', self.template)
return template % self.extra, params
def as_sqlite(self, *args, **kwargs):
sql, params = self.as_sql(*args, **kwargs)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(Value, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if (connection.features.has_native_duration_field and
connection.features.driver_supports_timedelta_args):
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super(RawSQL, self).__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
def __init__(self):
super(Random, self).__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super(Col, self).__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super(ExpressionWrapper, self).__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super(When, self).__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
template_params = {}
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
default = extra.pop('default', None)
output_field = extra.pop('output_field', None)
super(Case, self).__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super(Case, self).copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, extra=None):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = dict(extra) if extra else {}
case_parts = []
sql_params = []
for case in self.cases:
case_sql, case_params = compiler.compile(case)
case_parts.append(case_sql)
sql_params.extend(case_params)
template_params['cases'] = self.case_joiner.join(case_parts)
default_sql, default_params = compiler.compile(self.default)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or self.template
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Date(Expression):
"""
Add a date selection column.
"""
def __init__(self, lookup, lookup_type):
super(Date, self).__init__(output_field=fields.DateField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.lookup, self.lookup_type)
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateField), "%r isn't a DateField." % field.name
if settings.USE_TZ:
assert not isinstance(field, fields.DateTimeField), (
"%r is a DateTimeField, not a DateField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.date_trunc_sql(self.lookup_type, sql), []
def copy(self):
copy = super(Date, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
return copy
def convert_value(self, value, expression, connection, context):
if isinstance(value, datetime.datetime):
value = value.date()
return value
class DateTime(Expression):
"""
Add a datetime selection column.
"""
def __init__(self, lookup, lookup_type, tzinfo):
super(DateTime, self).__init__(output_field=fields.DateTimeField())
self.lookup = lookup
self.col = None
self.lookup_type = lookup_type
if tzinfo is None:
self.tzname = None
else:
self.tzname = timezone._get_timezone_name(tzinfo)
self.tzinfo = tzinfo
def __repr__(self):
return "{}({}, {}, {})".format(
self.__class__.__name__, self.lookup, self.lookup_type, self.tzinfo)
def get_source_expressions(self):
return [self.col]
def set_source_expressions(self, exprs):
self.col, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
copy = self.copy()
copy.col = query.resolve_ref(self.lookup, allow_joins, reuse, summarize)
field = copy.col.output_field
assert isinstance(field, fields.DateTimeField), (
"%r isn't a DateTimeField." % field.name
)
return copy
def as_sql(self, compiler, connection):
sql, params = self.col.as_sql(compiler, connection)
assert not(params)
return connection.ops.datetime_trunc_sql(self.lookup_type, sql, self.tzname)
def copy(self):
copy = super(DateTime, self).copy()
copy.lookup = self.lookup
copy.lookup_type = self.lookup_type
copy.tzname = self.tzname
return copy
def convert_value(self, value, expression, connection, context):
if settings.USE_TZ:
if value is None:
raise ValueError(
"Database returned an invalid value in QuerySet.datetimes(). "
"Are time zone definitions for your database and pytz installed?"
)
value = value.replace(tzinfo=None)
value = timezone.make_aware(value, self.tzinfo)
return value
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False):
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {'expression': expression_sql}
placeholders['ordering'] = 'DESC' if self.descending else 'ASC'
return (self.template % placeholders).rstrip(), params
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
|
claneys/shinken | refs/heads/master | shinken/objects/service.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
""" This Class is the service one, s it manage all service specific thing.
If you look at the scheduling part, look at the scheduling item class"""
import time
import re
import itertools
try:
from ClusterShell.NodeSet import NodeSet, NodeSetParseRangeError
except ImportError:
NodeSet = None
from shinken.objects.item import Items
from shinken.objects.schedulingitem import SchedulingItem
from shinken.autoslots import AutoSlots
from shinken.util import strip_and_uniq, format_t_into_dhms_format, to_svc_hst_distinct_lists, \
get_key_value_sequence, GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX, GET_KEY_VALUE_SEQUENCE_ERROR_NODEFAULT, \
GET_KEY_VALUE_SEQUENCE_ERROR_NODE, to_list_string_of_names, to_list_of_names, to_name_if_possible, \
is_complex_expr
from shinken.property import BoolProp, IntegerProp, FloatProp,\
CharProp, StringProp, ListProp, DictProp
from shinken.macroresolver import MacroResolver
from shinken.eventhandler import EventHandler
from shinken.log import logger, naglog_result
from shinken.util import filter_service_by_regex_name
from shinken.util import filter_service_by_host_name
class Service(SchedulingItem):
# AutoSlots create the __slots__ with properties and
# running_properties names
__metaclass__ = AutoSlots
# Every service have a unique ID, and 0 is always special in
# database and co...
id = 1
# The host and service do not have the same 0 value, now yes :)
ok_up = 'OK'
# used by item class for format specific value like for Broks
my_type = 'service'
# properties defined by configuration
# required: is required in conf
# default: default value if no set in conf
# pythonize: function to call when transforming string to python object
# fill_brok: if set, send to broker. there are two categories:
# full_status for initial and update status, check_result for check results
# no_slots: do not take this property for __slots__
properties = SchedulingItem.properties.copy()
properties.update({
'host_name':
StringProp(fill_brok=['full_status', 'check_result', 'next_schedule']),
'hostgroup_name':
StringProp(default='', fill_brok=['full_status'], merging='join'),
'service_description':
StringProp(fill_brok=['full_status', 'check_result', 'next_schedule']),
'display_name':
StringProp(default='', fill_brok=['full_status']),
'servicegroups':
ListProp(default=[], fill_brok=['full_status'],
brok_transformation=to_list_string_of_names, merging='join'),
'is_volatile':
BoolProp(default=False, fill_brok=['full_status']),
'check_command':
StringProp(fill_brok=['full_status']),
'initial_state':
CharProp(default='', fill_brok=['full_status']),
'initial_output':
StringProp(default='', fill_brok=['full_status']),
'max_check_attempts':
IntegerProp(default=1, fill_brok=['full_status']),
'check_interval':
IntegerProp(fill_brok=['full_status', 'check_result']),
'retry_interval':
IntegerProp(fill_brok=['full_status', 'check_result']),
'active_checks_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'passive_checks_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'check_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'obsess_over_service':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
'check_freshness':
BoolProp(default=False, fill_brok=['full_status']),
'freshness_threshold':
IntegerProp(default=0, fill_brok=['full_status']),
'event_handler':
StringProp(default='', fill_brok=['full_status']),
'event_handler_enabled':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
'low_flap_threshold':
IntegerProp(default=-1, fill_brok=['full_status']),
'high_flap_threshold':
IntegerProp(default=-1, fill_brok=['full_status']),
'flap_detection_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'flap_detection_options':
ListProp(default=['o', 'w', 'c', 'u'], fill_brok=['full_status'], split_on_coma=True),
'process_perf_data':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'retain_status_information':
BoolProp(default=True, fill_brok=['full_status']),
'retain_nonstatus_information':
BoolProp(default=True, fill_brok=['full_status']),
'notification_interval':
IntegerProp(default=60, fill_brok=['full_status']),
'first_notification_delay':
IntegerProp(default=0, fill_brok=['full_status']),
'notification_period':
StringProp(brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'notification_options':
ListProp(default=['w', 'u', 'c', 'r', 'f', 's'],
fill_brok=['full_status'], split_on_coma=True),
'notifications_enabled':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'contacts':
ListProp(default=[], brok_transformation=to_list_of_names,
fill_brok=['full_status'], merging='join'),
'contact_groups':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
'stalking_options':
ListProp(default=[''], fill_brok=['full_status'], merging='join'),
'notes':
StringProp(default='', fill_brok=['full_status']),
'notes_url':
StringProp(default='', fill_brok=['full_status']),
'action_url':
StringProp(default='', fill_brok=['full_status']),
'icon_image':
StringProp(default='', fill_brok=['full_status']),
'icon_image_alt':
StringProp(default='', fill_brok=['full_status']),
'icon_set':
StringProp(default='', fill_brok=['full_status']),
'failure_prediction_enabled':
BoolProp(default=False, fill_brok=['full_status']),
'parallelize_check':
BoolProp(default=True, fill_brok=['full_status']),
# Shinken specific
'poller_tag':
StringProp(default='None'),
'reactionner_tag':
StringProp(default='None'),
'resultmodulations':
ListProp(default=[], merging='join'),
'business_impact_modulations':
ListProp(default=[], merging='join'),
'escalations':
ListProp(default=[], fill_brok=['full_status'], merging='join', split_on_coma=True),
'maintenance_period':
StringProp(default='',
brok_transformation=to_name_if_possible, fill_brok=['full_status']),
'time_to_orphanage':
IntegerProp(default=300, fill_brok=['full_status']),
'merge_host_contacts':
BoolProp(default=False, fill_brok=['full_status']),
'labels':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
'host_dependency_enabled':
BoolProp(default=True, fill_brok=['full_status']),
# BUSINESS CORRELATOR PART
# Business rules output format template
'business_rule_output_template':
StringProp(default='', fill_brok=['full_status']),
# Business rules notifications mode
'business_rule_smart_notifications':
BoolProp(default=False, fill_brok=['full_status']),
# Treat downtimes as acknowledgements in smart notifications
'business_rule_downtime_as_ack':
BoolProp(default=False, fill_brok=['full_status']),
# Enforces child nodes notification options
'business_rule_host_notification_options':
ListProp(default=[], fill_brok=['full_status'], split_on_coma=True),
'business_rule_service_notification_options':
ListProp(default=[], fill_brok=['full_status'], split_on_coma=True),
# Easy Service dep definition
'service_dependencies': # TODO: find a way to brok it?
ListProp(default=None, merging='join', split_on_coma=True),
# service generator
'duplicate_foreach':
StringProp(default=''),
'default_value':
StringProp(default=''),
# Business_Impact value
'business_impact':
IntegerProp(default=2, fill_brok=['full_status']),
# Load some triggers
'trigger':
StringProp(default=''),
'trigger_name':
StringProp(default=''),
'trigger_broker_raise_enabled':
BoolProp(default=False),
# Trending
'trending_policies':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
# Our check ways. By defualt void, but will filled by an inner if need
'checkmodulations':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
'macromodulations':
ListProp(default=[], merging='join'),
# Custom views
'custom_views':
ListProp(default=[], fill_brok=['full_status'], merging='join'),
# UI aggregation
'aggregation':
StringProp(default='', fill_brok=['full_status']),
# Snapshot part
'snapshot_enabled':
BoolProp(default=False),
'snapshot_command':
StringProp(default=''),
'snapshot_period':
StringProp(default=''),
'snapshot_criteria':
ListProp(default=['w', 'c', 'u'], fill_brok=['full_status'], merging='join'),
'snapshot_interval':
IntegerProp(default=5),
})
# properties used in the running state
running_properties = SchedulingItem.running_properties.copy()
running_properties.update({
'modified_attributes':
IntegerProp(default=0L, fill_brok=['full_status'], retention=True),
'last_chk':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'next_chk':
IntegerProp(default=0, fill_brok=['full_status', 'next_schedule'], retention=True),
'in_checking':
BoolProp(default=False,
fill_brok=['full_status', 'check_result', 'next_schedule'], retention=True),
'in_maintenance':
IntegerProp(default=None, fill_brok=['full_status'], retention=True),
'latency':
FloatProp(default=0, fill_brok=['full_status', 'check_result'], retention=True,),
'attempt':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'state':
StringProp(default='PENDING',
fill_brok=['full_status', 'check_result'], retention=True),
'state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'current_event_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_event_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_state':
StringProp(default='PENDING',
fill_brok=['full_status', 'check_result'], retention=True),
'last_state_type':
StringProp(default='HARD', fill_brok=['full_status', 'check_result'], retention=True),
'last_state_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'last_hard_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'last_hard_state':
StringProp(default='PENDING', fill_brok=['full_status'], retention=True),
'last_hard_state_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'last_time_ok':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_warning':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_critical':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'last_time_unknown':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'duration_sec':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'state_type':
StringProp(default='HARD', fill_brok=['full_status', 'check_result'], retention=True),
'state_type_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'output':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'long_output':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'is_flapping':
BoolProp(default=False, fill_brok=['full_status'], retention=True),
# dependencies for actions like notif of event handler,
# so AFTER check return
'act_depend_of':
ListProp(default=[]),
# dependencies for checks raise, so BEFORE checks
'chk_depend_of':
ListProp(default=[]),
# elements that depend of me, so the reverse than just upper
'act_depend_of_me':
ListProp(default=[]),
# elements that depend of me
'chk_depend_of_me':
ListProp(default=[]),
'last_state_update':
FloatProp(default=0.0, fill_brok=['full_status'], retention=True),
# no brok because checks are too linked
'checks_in_progress':
ListProp(default=[]),
# no broks because notifications are too linked
'notifications_in_progress': DictProp(default={}, retention=True),
'downtimes':
ListProp(default=[], fill_brok=['full_status'], retention=True),
'comments':
ListProp(default=[], fill_brok=['full_status'], retention=True),
'flapping_changes':
ListProp(default=[], fill_brok=['full_status'], retention=True),
'flapping_comment_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'percent_state_change':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'problem_has_been_acknowledged':
BoolProp(default=False, fill_brok=['full_status', 'check_result'], retention=True),
'acknowledgement':
StringProp(default=None, retention=True),
'acknowledgement_type':
IntegerProp(default=1, fill_brok=['full_status', 'check_result'], retention=True),
'check_type':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'has_been_checked':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'should_be_scheduled':
IntegerProp(default=1, fill_brok=['full_status'], retention=True),
'last_problem_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'current_problem_id':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'execution_time':
FloatProp(default=0.0, fill_brok=['full_status', 'check_result'], retention=True),
'u_time':
FloatProp(default=0.0),
's_time':
FloatProp(default=0.0),
'last_notification':
FloatProp(default=0.0, fill_brok=['full_status'], retention=True),
'current_notification_number':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'current_notification_id':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'check_flapping_recovery_notification':
BoolProp(default=True, fill_brok=['full_status'], retention=True),
'scheduled_downtime_depth':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'pending_flex_downtime':
IntegerProp(default=0, fill_brok=['full_status'], retention=True),
'timeout':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'start_time':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'end_time':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'early_timeout':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'return_code':
IntegerProp(default=0, fill_brok=['full_status', 'check_result'], retention=True),
'perf_data':
StringProp(default='', fill_brok=['full_status', 'check_result'], retention=True),
'last_perf_data':
StringProp(default='', retention=True),
'host':
StringProp(default=None),
'customs':
DictProp(default={}, fill_brok=['full_status']),
# Warning: for the notified_contacts retention save,
# we save only the names of the contacts, and we should RELINK
# them when we load it.
# use for having all contacts we have notified
'notified_contacts': ListProp(default=set(),
retention=True,
retention_preparation=to_list_of_names),
'in_scheduled_downtime': BoolProp(
default=False, fill_brok=['full_status', 'check_result'], retention=True),
'in_scheduled_downtime_during_last_check': BoolProp(default=False, retention=True),
'actions': ListProp(default=[]), # put here checks and notif raised
'broks': ListProp(default=[]), # and here broks raised
# Problem/impact part
'is_problem': BoolProp(default=False, fill_brok=['full_status']),
'is_impact': BoolProp(default=False, fill_brok=['full_status']),
# the save value of our business_impact for "problems"
'my_own_business_impact': IntegerProp(default=-1, fill_brok=['full_status']),
# list of problems that make us an impact
'source_problems': ListProp(default=[],
fill_brok=['full_status'],
brok_transformation=to_svc_hst_distinct_lists),
# list of the impact I'm the cause of
'impacts': ListProp(default=[],
fill_brok=['full_status'],
brok_transformation=to_svc_hst_distinct_lists),
# keep a trace of the old state before being an impact
'state_before_impact': StringProp(default='PENDING'),
# keep a trace of the old state id before being an impact
'state_id_before_impact': IntegerProp(default=0),
# if the state change, we know so we do not revert it
'state_changed_since_impact': BoolProp(default=False),
# BUSINESS CORRELATOR PART
# Say if we are business based rule or not
'got_business_rule': BoolProp(default=False, fill_brok=['full_status']),
# Previously processed business rule (with macro expanded)
'processed_business_rule': StringProp(default="", fill_brok=['full_status']),
# Our Dependency node for the business rule
'business_rule': StringProp(default=None),
# Here it's the elements we are depending on
# so our parents as network relation, or a host
# we are depending in a hostdependency
# or even if we are business based.
'parent_dependencies': StringProp(default=set(),
brok_transformation=to_svc_hst_distinct_lists,
fill_brok=['full_status']),
# Here it's the guys that depend on us. So it's the total
# opposite of the parent_dependencies
'child_dependencies': StringProp(brok_transformation=to_svc_hst_distinct_lists,
default=set(), fill_brok=['full_status']),
# Manage the unknown/unreach during hard state
'in_hard_unknown_reach_phase': BoolProp(default=False, retention=True),
'was_in_hard_unknown_reach_phase': BoolProp(default=False, retention=True),
'state_before_hard_unknown_reach_phase': StringProp(default='OK', retention=True),
# Set if the element just change its father/son topology
'topology_change': BoolProp(default=False, fill_brok=['full_status']),
# Trigger list
'triggers': ListProp(default=[]),
# snapshots part
'last_snapshot': IntegerProp(default=0, fill_brok=['full_status'], retention=True),
# Keep the string of the last command launched for this element
'last_check_command': StringProp(default=''),
})
# Mapping between Macros and properties (can be prop or a function)
macros = {
'SERVICEDESC': 'service_description',
'SERVICEDISPLAYNAME': 'display_name',
'SERVICESTATE': 'state',
'SERVICESTATEID': 'state_id',
'LASTSERVICESTATE': 'last_state',
'LASTSERVICESTATEID': 'last_state_id',
'SERVICESTATETYPE': 'state_type',
'SERVICEATTEMPT': 'attempt',
'MAXSERVICEATTEMPTS': 'max_check_attempts',
'SERVICEISVOLATILE': 'is_volatile',
'SERVICEEVENTID': 'current_event_id',
'LASTSERVICEEVENTID': 'last_event_id',
'SERVICEPROBLEMID': 'current_problem_id',
'LASTSERVICEPROBLEMID': 'last_problem_id',
'SERVICELATENCY': 'latency',
'SERVICEEXECUTIONTIME': 'execution_time',
'SERVICEDURATION': 'get_duration',
'SERVICEDURATIONSEC': 'get_duration_sec',
'SERVICEDOWNTIME': 'get_downtime',
'SERVICEPERCENTCHANGE': 'percent_state_change',
'SERVICEGROUPNAME': 'get_groupname',
'SERVICEGROUPNAMES': 'get_groupnames',
'LASTSERVICECHECK': 'last_chk',
'LASTSERVICESTATECHANGE': 'last_state_change',
'LASTSERVICEOK': 'last_time_ok',
'LASTSERVICEWARNING': 'last_time_warning',
'LASTSERVICEUNKNOWN': 'last_time_unknown',
'LASTSERVICECRITICAL': 'last_time_critical',
'SERVICEOUTPUT': 'output',
'LONGSERVICEOUTPUT': 'long_output',
'SERVICEPERFDATA': 'perf_data',
'LASTSERVICEPERFDATA': 'last_perf_data',
'SERVICECHECKCOMMAND': 'get_check_command',
'SERVICEACKAUTHOR': 'get_ack_author_name',
'SERVICEACKAUTHORNAME': 'get_ack_author_name',
'SERVICEACKAUTHORALIAS': 'get_ack_author_name',
'SERVICEACKCOMMENT': 'get_ack_comment',
'SERVICEACTIONURL': 'action_url',
'SERVICENOTESURL': 'notes_url',
'SERVICENOTES': 'notes',
'SERVICEBUSINESSIMPACT': 'business_impact',
# Business rules output formatting related macros
'STATUS': 'get_status',
'SHORTSTATUS': 'get_short_status',
'FULLNAME': 'get_full_name',
}
# This tab is used to transform old parameters name into new ones
# so from Nagios2 format, to Nagios3 ones.
# Or Shinken deprecated names like criticity
old_properties = {
'normal_check_interval': 'check_interval',
'retry_check_interval': 'retry_interval',
'criticity': 'business_impact',
'hostgroup': 'hostgroup_name',
'hostgroups': 'hostgroup_name',
# 'criticitymodulations': 'business_impact_modulations',
}
#######
# __ _ _ _
# / _(_) | | (_)
# ___ ___ _ __ | |_ _ __ _ _ _ _ __ __ _| |_ _ ___ _ __
# / __/ _ \| '_ \| _| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \
# | (_| (_) | | | | | | | (_| | |_| | | | (_| | |_| | (_) | | | |
# \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
######
def __repr__(self):
return '<Service host_name=%r desc=%r name=%r use=%r />' % (
getattr(self, 'host_name', None),
getattr(self, 'service_description', None),
getattr(self, 'name', None),
getattr(self, 'use', None)
)
__str__ = __repr__
@property
def unique_key(self): # actually only used for (un)indexitem() via name_property..
return (self.host_name, self.service_description)
@property
def display_name(self):
display_name = getattr(self, '_display_name', None)
if not display_name:
return self.service_description
return display_name
@display_name.setter
def display_name(self, display_name):
self._display_name = display_name
# Give a nice name output
def get_name(self):
if hasattr(self, 'service_description'):
return self.service_description
if hasattr(self, 'name'):
return self.name
return 'SERVICE-DESCRIPTION-MISSING'
# Get the servicegroups names
def get_groupnames(self):
return ','.join([sg.get_name() for sg in self.servicegroups])
# Need the whole name for debugging purpose
def get_dbg_name(self):
return "%s/%s" % (self.host.host_name, self.service_description)
def get_full_name(self):
if self.host and hasattr(self.host, 'host_name') and hasattr(self, 'service_description'):
return "%s/%s" % (self.host.host_name, self.service_description)
return 'UNKNOWN-SERVICE'
# Get our realm, so in fact our host one
def get_realm(self):
if self.host is None:
return None
return self.host.get_realm()
def get_hostgroups(self):
return self.host.hostgroups
def get_host_tags(self):
return self.host.tags
def get_service_tags(self):
return self.tags
def is_duplicate(self):
"""
Indicates if a service holds a duplicate_foreach statement
"""
if getattr(self, "duplicate_foreach", None):
return True
else:
return False
def set_initial_state(self):
mapping = {
"o": {
"state": "OK",
"state_id": 0
},
"w": {
"state": "WARNING",
"state_id": 1
},
"c": {
"state": "CRITICAL",
"state_id": 2
},
"u": {
"state": "UNKNOWN",
"state_id": 3
},
}
SchedulingItem.set_initial_state(self, mapping)
# Check is required prop are set:
# template are always correct
# contacts OR contactgroups is need
def is_correct(self):
state = True
cls = self.__class__
source = getattr(self, 'imported_from', 'unknown')
desc = getattr(self, 'service_description', 'unnamed')
hname = getattr(self, 'host_name', 'unnamed')
special_properties = ('check_period', 'notification_interval', 'host_name',
'hostgroup_name', 'notification_period')
for prop, entry in cls.properties.items():
if prop not in special_properties:
if not hasattr(self, prop) and entry.required:
logger.error("The service %s on host '%s' does not have %s", desc, hname, prop)
state = False # Bad boy...
# Then look if we have some errors in the conf
# Juts print warnings, but raise errors
for err in self.configuration_warnings:
logger.warning("[service::%s] %s", desc, err)
# Raised all previously saw errors like unknown contacts and co
if self.configuration_errors != []:
state = False
for err in self.configuration_errors:
logger.error("[service::%s] %s", self.get_full_name(), err)
# If no notif period, set it to None, mean 24x7
if not hasattr(self, 'notification_period'):
self.notification_period = None
# Ok now we manage special cases...
if self.notifications_enabled and self.contacts == []:
logger.warning("The service '%s' in the host '%s' does not have "
"contacts nor contact_groups in '%s'", desc, hname, source)
# Set display_name if need
if getattr(self, 'display_name', '') == '':
self.display_name = getattr(self, 'service_description', '')
# If we got an event handler, it should be valid
if getattr(self, 'event_handler', None) and not self.event_handler.is_valid():
logger.error("%s: my event_handler %s is invalid",
self.get_name(), self.event_handler.command)
state = False
if not hasattr(self, 'check_command'):
logger.error("%s: I've got no check_command", self.get_name())
state = False
# Ok got a command, but maybe it's invalid
else:
if not self.check_command.is_valid():
logger.error("%s: my check_command %s is invalid",
self.get_name(), self.check_command.command)
state = False
if self.got_business_rule:
if not self.business_rule.is_valid():
logger.error("%s: my business rule is invalid", self.get_name(),)
for bperror in self.business_rule.configuration_errors:
logger.error("%s: %s", self.get_name(), bperror)
state = False
if not hasattr(self, 'notification_interval') \
and self.notifications_enabled is True:
logger.error("%s: I've got no notification_interval but "
"I've got notifications enabled", self.get_name())
state = False
if not self.host_name:
logger.error("The service '%s' is not bound do any host.", desc)
state = False
elif self.host is None:
logger.error("The service '%s' got an unknown host_name '%s'.", desc, self.host_name)
state = False
if not hasattr(self, 'check_period'):
self.check_period = None
if hasattr(self, 'service_description'):
for c in cls.illegal_object_name_chars:
if c in self.service_description:
logger.error("%s: My service_description got the "
"character %s that is not allowed.", self.get_name(), c)
state = False
return state
# The service is dependent of his father dep
# Must be AFTER linkify
# TODO: implement "not host dependent" feature.
def fill_daddy_dependency(self):
# Depend of host, all status, is a networkdep
# and do not have timeperiod, and follow parents dep
if self.host is not None and self.host_dependency_enabled:
# I add the dep in MY list
self.act_depend_of.append(
(self.host, ['d', 'u', 's', 'f'], 'network_dep', None, True)
)
# I add the dep in Daddy list
self.host.act_depend_of_me.append(
(self, ['d', 'u', 's', 'f'], 'network_dep', None, True)
)
# And the parent/child dep lists too
self.host.register_son_in_parent_child_dependencies(self)
# Register the dependency between 2 service for action (notification etc)
def add_service_act_dependency(self, srv, status, timeperiod, inherits_parent):
# first I add the other the I depend on in MY list
self.act_depend_of.append((srv, status, 'logic_dep', timeperiod, inherits_parent))
# then I register myself in the other service dep list
srv.act_depend_of_me.append((self, status, 'logic_dep', timeperiod, inherits_parent))
# And the parent/child dep lists too
srv.register_son_in_parent_child_dependencies(self)
# Register the dependency between 2 service for action (notification etc)
# but based on a BUSINESS rule, so on fact:
# ERP depend on database, so we fill just database.act_depend_of_me
# because we will want ERP mails to go on! So call this
# on the database service with the srv=ERP service
def add_business_rule_act_dependency(self, srv, status, timeperiod, inherits_parent):
# I only register so he know that I WILL be a impact
self.act_depend_of_me.append((srv, status, 'business_dep',
timeperiod, inherits_parent))
# And the parent/child dep lists too
self.register_son_in_parent_child_dependencies(srv)
# Register the dependency between 2 service for checks
def add_service_chk_dependency(self, srv, status, timeperiod, inherits_parent):
# first I add the other the I depend on in MY list
self.chk_depend_of.append((srv, status, 'logic_dep', timeperiod, inherits_parent))
# then I register myself in the other service dep list
srv.chk_depend_of_me.append(
(self, status, 'logic_dep', timeperiod, inherits_parent)
)
# And the parent/child dep lists too
srv.register_son_in_parent_child_dependencies(self)
def duplicate(self, host):
''' For a given host, look for all copy we must create for for_each property
:type host: shinken.objects.host.Host
:return Service
'''
# In macro, it's all in UPPER case
prop = self.duplicate_foreach.strip().upper()
if prop not in host.customs: # If I do not have the property, we bail out
return []
duplicates = []
# Get the list entry, and the not one if there is one
entry = host.customs[prop]
# Look at the list of the key we do NOT want maybe,
# for _disks it will be _!disks
not_entry = host.customs.get('_' + '!' + prop[1:], '').split(',')
not_keys = strip_and_uniq(not_entry)
default_value = getattr(self, 'default_value', '')
# Transform the generator string to a list
# Missing values are filled with the default value
(key_values, errcode) = get_key_value_sequence(entry, default_value)
if key_values:
for key_value in key_values:
key = key_value['KEY']
# Maybe this key is in the NOT list, if so, skip it
if key in not_keys:
continue
value = key_value['VALUE']
new_s = self.copy()
new_s.host_name = host.get_name()
if self.is_tpl(): # if template, the new one is not
new_s.register = 1
for key in key_value:
if key == 'KEY':
if hasattr(self, 'service_description'):
# We want to change all illegal chars to a _ sign.
# We can't use class.illegal_obj_char
# because in the "explode" phase, we do not have access to this data! :(
safe_key_value = re.sub(r'[' + "`~!$%^&*\"|'<>?,()=" + ']+', '_',
key_value[key])
new_s.service_description = self.service_description.replace(
'$' + key + '$', safe_key_value
)
# Here is a list of property where we will expand the $KEY$ by the value
_the_expandables = ['check_command',
'display_name',
'aggregation',
'event_handler']
for prop in _the_expandables:
if hasattr(self, prop):
# here we can replace VALUE, VALUE1, VALUE2,...
setattr(new_s, prop, getattr(new_s, prop).replace('$' + key + '$',
key_value[key]))
if hasattr(self, 'service_dependencies'):
for i, sd in enumerate(new_s.service_dependencies):
new_s.service_dependencies[i] = sd.replace(
'$' + key + '$', key_value[key]
)
# And then add in our list this new service
duplicates.append(new_s)
else:
# If error, we should link the error to the host, because self is
# a template, and so won't be checked not print!
if errcode == GET_KEY_VALUE_SEQUENCE_ERROR_SYNTAX:
err = "The custom property '%s' of the host '%s' is not a valid entry %s for a service generator" % \
(self.duplicate_foreach.strip(), host.get_name(), entry)
logger.warning(err)
host.configuration_errors.append(err)
elif errcode == GET_KEY_VALUE_SEQUENCE_ERROR_NODEFAULT:
err = "The custom property '%s 'of the host '%s' has empty " \
"values %s but the service %s has no default_value" % \
(self.duplicate_foreach.strip(),
host.get_name(), entry, self.service_description)
logger.warning(err)
host.configuration_errors.append(err)
elif errcode == GET_KEY_VALUE_SEQUENCE_ERROR_NODE:
err = "The custom property '%s' of the host '%s' has an invalid node range %s" % \
(self.duplicate_foreach.strip(), host.get_name(), entry)
logger.warning(err)
host.configuration_errors.append(err)
return duplicates
#####
# _
# (_)
# _ __ _ _ _ __ _ __ _ _ __ __ _
# | '__| | | | '_ \| '_ \| | '_ \ / _` |
# | | | |_| | | | | | | | | | | | (_| |
# |_| \__,_|_| |_|_| |_|_|_| |_|\__, |
# __/ |
# |___/
####
# Set unreachable: our host is DOWN, but it mean nothing for a service
def set_unreachable(self):
pass
# We just go an impact, so we go unreachable
# but only if it's enable in the configuration
def set_impact_state(self):
cls = self.__class__
if cls.enable_problem_impacts_states_change:
# Keep a trace of the old state (problem came back before
# a new checks)
self.state_before_impact = self.state
self.state_id_before_impact = self.state_id
# this flag will know if we override the impact state
self.state_changed_since_impact = False
self.state = 'UNKNOWN' # exit code UNDETERMINED
self.state_id = 3
# Ok, we are no more an impact, if no news checks
# override the impact state, we came back to old
# states
# And only if we enable the state change for impacts
def unset_impact_state(self):
cls = self.__class__
if cls.enable_problem_impacts_states_change and not self.state_changed_since_impact:
self.state = self.state_before_impact
self.state_id = self.state_id_before_impact
# Set state with status return by the check
# and update flapping state
def set_state_from_exit_status(self, status):
now = time.time()
self.last_state_update = now
# we should put in last_state the good last state:
# if not just change the state by an problem/impact
# we can take current state. But if it's the case, the
# real old state is self.state_before_impact (it's the TRUE
# state in fact)
# but only if the global conf have enable the impact state change
cls = self.__class__
if cls.enable_problem_impacts_states_change \
and self.is_impact \
and not self.state_changed_since_impact:
self.last_state = self.state_before_impact
else: # standard case
self.last_state = self.state
if status == 0:
self.state = 'OK'
self.state_id = 0
self.last_time_ok = int(self.last_state_update)
state_code = 'o'
elif status == 1:
self.state = 'WARNING'
self.state_id = 1
self.last_time_warning = int(self.last_state_update)
state_code = 'w'
elif status == 2:
self.state = 'CRITICAL'
self.state_id = 2
self.last_time_critical = int(self.last_state_update)
state_code = 'c'
elif status == 3:
self.state = 'UNKNOWN'
self.state_id = 3
self.last_time_unknown = int(self.last_state_update)
state_code = 'u'
else:
self.state = 'CRITICAL' # exit code UNDETERMINED
self.state_id = 2
self.last_time_critical = int(self.last_state_update)
state_code = 'c'
if state_code in self.flap_detection_options:
self.add_flapping_change(self.state != self.last_state)
if self.state != self.last_state:
self.last_state_change = self.last_state_update
self.duration_sec = now - self.last_state_change
# Return True if status is the state (like OK) or small form like 'o'
def is_state(self, status):
if status == self.state:
return True
# Now low status
elif status == 'o' and self.state == 'OK':
return True
elif status == 'c' and self.state == 'CRITICAL':
return True
elif status == 'w' and self.state == 'WARNING':
return True
elif status == 'u' and self.state == 'UNKNOWN':
return True
return False
# The last time when the state was not OK
def last_time_non_ok_or_up(self):
non_ok_times = filter(lambda x: x > self.last_time_ok, [self.last_time_warning,
self.last_time_critical,
self.last_time_unknown])
if len(non_ok_times) == 0:
last_time_non_ok = 0 # program_start would be better
else:
last_time_non_ok = min(non_ok_times)
return last_time_non_ok
# Add a log entry with a SERVICE ALERT like:
# SERVICE ALERT: server;Load;UNKNOWN;HARD;1;I don't know what to say...
def raise_alert_log_entry(self):
naglog_result('critical', 'SERVICE ALERT: %s;%s;%s;%s;%d;%s'
% (self.host.get_name(), self.get_name(),
self.state, self.state_type,
self.attempt, self.output))
# If the configuration allow it, raise an initial log like
# CURRENT SERVICE STATE: server;Load;UNKNOWN;HARD;1;I don't know what to say...
def raise_initial_state(self):
if self.__class__.log_initial_states:
naglog_result('info', 'CURRENT SERVICE STATE: %s;%s;%s;%s;%d;%s'
% (self.host.get_name(), self.get_name(),
self.state, self.state_type, self.attempt, self.output))
# Add a log entry with a Freshness alert like:
# Warning: The results of host 'Server' are stale by 0d 0h 0m 58s (threshold=0d 1h 0m 0s).
# I'm forcing an immediate check of the host.
def raise_freshness_log_entry(self, t_stale_by, t_threshold):
logger.warning("The results of service '%s' on host '%s' are stale "
"by %s (threshold=%s). I'm forcing an immediate check "
"of the service.",
self.get_name(), self.host.get_name(),
format_t_into_dhms_format(t_stale_by),
format_t_into_dhms_format(t_threshold))
# Raise a log entry with a Notification alert like
# SERVICE NOTIFICATION: superadmin;server;Load;OK;notify-by-rss;no output
def raise_notification_log_entry(self, n):
contact = n.contact
command = n.command_call
if n.type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED',
'CUSTOM', 'ACKNOWLEDGEMENT', 'FLAPPINGSTART',
'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
state = '%s (%s)' % (n.type, self.state)
else:
state = self.state
if self.__class__.log_notifications:
naglog_result('critical', "SERVICE NOTIFICATION: %s;%s;%s;%s;%s;%s"
% (contact.get_name(),
self.host.get_name(), self.get_name(), state,
command.get_name(), self.output))
# Raise a log entry with a Eventhandler alert like
# SERVICE EVENT HANDLER: test_host_0;test_ok_0;OK;SOFT;4;eventhandler
def raise_event_handler_log_entry(self, command):
if self.__class__.log_event_handlers:
naglog_result('critical', "SERVICE EVENT HANDLER: %s;%s;%s;%s;%s;%s"
% (self.host.get_name(), self.get_name(),
self.state, self.state_type,
self.attempt, command.get_name()))
# Raise a log entry with a Eventhandler alert like
# SERVICE SNAPSHOT: test_host_0;test_ok_0;OK;SOFT;4;eventhandler
def raise_snapshot_log_entry(self, command):
if self.__class__.log_event_handlers:
naglog_result('critical', "SERVICE SNAPSHOT: %s;%s;%s;%s;%s;%s"
% (self.host.get_name(), self.get_name(),
self.state, self.state_type, self.attempt, command.get_name()))
# Raise a log entry with FLAPPING START alert like
# SERVICE FLAPPING ALERT: server;LOAD;STARTED;
# Service appears to have started flapping (50.6% change >= 50.0% threshold)
def raise_flapping_start_log_entry(self, change_ratio, threshold):
naglog_result('critical', "SERVICE FLAPPING ALERT: %s;%s;STARTED; "
"Service appears to have started flapping "
"(%.1f%% change >= %.1f%% threshold)"
% (self.host.get_name(), self.get_name(),
change_ratio, threshold))
# Raise a log entry with FLAPPING STOP alert like
# SERVICE FLAPPING ALERT: server;LOAD;STOPPED;
# Service appears to have stopped flapping (23.0% change < 25.0% threshold)
def raise_flapping_stop_log_entry(self, change_ratio, threshold):
naglog_result('critical', "SERVICE FLAPPING ALERT: %s;%s;STOPPED; "
"Service appears to have stopped flapping "
"(%.1f%% change < %.1f%% threshold)"
% (self.host.get_name(), self.get_name(),
change_ratio, threshold))
# If there is no valid time for next check, raise a log entry
def raise_no_next_check_log_entry(self):
logger.warning("I cannot schedule the check for the service '%s' on "
"host '%s' because there is not future valid time",
self.get_name(), self.host.get_name())
# Raise a log entry when a downtime begins
# SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STARTED;
# Service has entered a period of scheduled downtime
def raise_enter_downtime_log_entry(self):
naglog_result('critical', "SERVICE DOWNTIME ALERT: %s;%s;STARTED; "
"Service has entered a period of scheduled "
"downtime" % (self.host.get_name(), self.get_name()))
# Raise a log entry when a downtime has finished
# SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;STOPPED;
# Service has exited from a period of scheduled downtime
def raise_exit_downtime_log_entry(self):
naglog_result('critical', "SERVICE DOWNTIME ALERT: %s;%s;STOPPED; Service "
"has exited from a period of scheduled downtime"
% (self.host.get_name(), self.get_name()))
# Raise a log entry when a downtime prematurely ends
# SERVICE DOWNTIME ALERT: test_host_0;test_ok_0;CANCELLED;
# Service has entered a period of scheduled downtime
def raise_cancel_downtime_log_entry(self):
naglog_result(
'critical', "SERVICE DOWNTIME ALERT: %s;%s;CANCELLED; "
"Scheduled downtime for service has been cancelled."
% (self.host.get_name(), self.get_name()))
# Is stalking?
# Launch if check is waitconsume==first time
# and if c.status is in self.stalking_options
def manage_stalking(self, c):
need_stalk = False
if c.status == 'waitconsume':
if c.exit_status == 0 and 'o' in self.stalking_options:
need_stalk = True
elif c.exit_status == 1 and 'w' in self.stalking_options:
need_stalk = True
elif c.exit_status == 2 and 'c' in self.stalking_options:
need_stalk = True
elif c.exit_status == 3 and 'u' in self.stalking_options:
need_stalk = True
if c.output == self.output:
need_stalk = False
if need_stalk:
logger.info("Stalking %s: %s", self.get_name(), c.output)
# Give data for checks's macros
def get_data_for_checks(self):
return [self.host, self]
# Give data for event handlers's macros
def get_data_for_event_handler(self):
return [self.host, self]
# Give data for notifications'n macros
def get_data_for_notifications(self, contact, n):
return [self.host, self, contact, n]
# See if the notification is launchable (time is OK and contact is OK too)
def notification_is_blocked_by_contact(self, n, contact):
return not contact.want_service_notification(self.last_chk, self.state,
n.type, self.business_impact, n.command_call)
def get_duration_sec(self):
return str(int(self.duration_sec))
def get_duration(self):
m, s = divmod(self.duration_sec, 60)
h, m = divmod(m, 60)
return "%02dh %02dm %02ds" % (h, m, s)
def get_ack_author_name(self):
if self.acknowledgement is None:
return ''
return self.acknowledgement.author
def get_ack_comment(self):
if self.acknowledgement is None:
return ''
return self.acknowledgement.comment
def get_check_command(self):
return self.check_command.get_name()
# Check if a notification for this service is suppressed at this time
def notification_is_blocked_by_item(self, type, t_wished=None):
if t_wished is None:
t_wished = time.time()
# TODO
# forced notification
# pass if this is a custom notification
# Block if notifications are program-wide disabled
if not self.enable_notifications:
return True
# Does the notification period allow sending out this notification?
if self.notification_period is not None \
and not self.notification_period.is_time_valid(t_wished):
return True
# Block if notifications are disabled for this service
if not self.notifications_enabled:
return True
# Block if the current status is in the notification_options w,u,c,r,f,s
if 'n' in self.notification_options:
return True
if type in ('PROBLEM', 'RECOVERY'):
if self.state == 'UNKNOWN' and 'u' not in self.notification_options:
return True
if self.state == 'WARNING' and 'w' not in self.notification_options:
return True
if self.state == 'CRITICAL' and 'c' not in self.notification_options:
return True
if self.state == 'OK' and 'r' not in self.notification_options:
return True
if (type in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED')
and 'f' not in self.notification_options):
return True
if (type in ('DOWNTIMESTART', 'DOWNTIMEEND', 'DOWNTIMECANCELLED')
and 's' not in self.notification_options):
return True
# Acknowledgements make no sense when the status is ok/up
if type == 'ACKNOWLEDGEMENT':
if self.state == self.ok_up:
return True
# When in downtime, only allow end-of-downtime notifications
if self.scheduled_downtime_depth > 1 and type not in ('DOWNTIMEEND', 'DOWNTIMECANCELLED'):
return True
# Block if host is in a scheduled downtime
if self.host.scheduled_downtime_depth > 0:
return True
# Block if in a scheduled downtime and a problem arises, or flapping event
if self.scheduled_downtime_depth > 0 and type in \
('PROBLEM', 'RECOVERY', 'FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return True
# Block if the status is SOFT
if self.state_type == 'SOFT' and type == 'PROBLEM':
return True
# Block if the problem has already been acknowledged
if self.problem_has_been_acknowledged and type != 'ACKNOWLEDGEMENT':
return True
# Block if flapping
if self.is_flapping and type not in ('FLAPPINGSTART', 'FLAPPINGSTOP', 'FLAPPINGDISABLED'):
return True
# Block if host is down
if self.host.state != self.host.ok_up:
return True
# Block if business rule smart notifications is enabled and all its
# childs have been acknowledged or are under downtime.
if self.got_business_rule is True \
and self.business_rule_smart_notifications is True \
and self.business_rule_notification_is_blocked() is True \
and type == 'PROBLEM':
return True
return False
# Get a oc*p command if item has obsess_over_*
# command. It must be enabled locally and globally
def get_obsessive_compulsive_processor_command(self):
cls = self.__class__
if not cls.obsess_over or not self.obsess_over_service:
return
m = MacroResolver()
data = self.get_data_for_event_handler()
cmd = m.resolve_command(cls.ocsp_command, data)
e = EventHandler(cmd, timeout=cls.ocsp_timeout)
# ok we can put it in our temp action queue
self.actions.append(e)
def get_short_status(self):
mapping = {
0: "O",
1: "W",
2: "C",
3: "U",
}
if self.got_business_rule:
return mapping.get(self.business_rule.get_state(), "n/a")
else:
return mapping.get(self.state_id, "n/a")
def get_status(self):
if self.got_business_rule:
mapping = {
0: "OK",
1: "WARNING",
2: "CRITICAL",
3: "UNKNOWN",
}
return mapping.get(self.business_rule.get_state(), "n/a")
else:
return self.state
def get_downtime(self):
return str(self.scheduled_downtime_depth)
# Class for list of services. It's mainly, mainly for configuration part
class Services(Items):
name_property = 'unique_key' # only used by (un)indexitem (via 'name_property')
inner_class = Service # use for know what is in items
def add_template(self, tpl):
"""
Adds and index a template into the `templates` container.
This implementation takes into account that a service has two naming
attribute: `host_name` and `service_description`.
:param tpl: The template to add
"""
objcls = self.inner_class.my_type
name = getattr(tpl, 'name', '')
hname = getattr(tpl, 'host_name', '')
if not name and not hname:
mesg = "a %s template has been defined without name nor " \
"host_name%s" % (objcls, self.get_source(tpl))
tpl.configuration_errors.append(mesg)
elif name:
tpl = self.index_template(tpl)
self.templates[tpl.id] = tpl
def add_item(self, item, index=True):
"""
Adds and index an item into the `items` container.
This implementation takes into account that a service has two naming
attribute: `host_name` and `service_description`.
:param item: The item to add
:param index: Flag indicating if the item should be indexed
"""
objcls = self.inner_class.my_type
hname = getattr(item, 'host_name', '')
hgname = getattr(item, 'hostgroup_name', '')
sdesc = getattr(item, 'service_description', '')
source = getattr(item, 'imported_from', 'unknown')
if source:
in_file = " in %s" % source
else:
in_file = ""
if not hname and not hgname:
mesg = "a %s has been defined without host_name nor " \
"hostgroups%s" % (objcls, in_file)
item.configuration_errors.append(mesg)
if index is True:
if hname and sdesc:
item = self.index_item(item)
else:
mesg = "a %s has been defined without host_name nor " \
"service_description%s" % (objcls, in_file)
item.configuration_errors.append(mesg)
return
self.items[item.id] = item
# Inheritance for just a property
def apply_partial_inheritance(self, prop):
for i in itertools.chain(self.items.itervalues(),
self.templates.itervalues()):
i.get_property_by_inheritance(prop, 0)
# If a "null" attribute was inherited, delete it
try:
if getattr(i, prop) == 'null':
delattr(i, prop)
except AttributeError:
pass
def apply_inheritance(self):
""" For all items and templates inherite properties and custom
variables.
"""
# We check for all Class properties if the host has it
# if not, it check all host templates for a value
cls = self.inner_class
for prop in cls.properties:
self.apply_partial_inheritance(prop)
for i in itertools.chain(self.items.itervalues(),
self.templates.itervalues()):
i.get_customs_properties_by_inheritance(0)
def linkify_templates(self):
# First we create a list of all templates
for i in itertools.chain(self.items.itervalues(),
self.templates.itervalues()):
self.linkify_item_templates(i)
for i in self:
i.tags = self.get_all_tags(i)
# Search for all of the services in a host
def find_srvs_by_hostname(self, host_name):
if hasattr(self, 'hosts'):
h = self.hosts.find_by_name(host_name)
if h is None:
return None
return h.get_services()
return None
# Search a service by it's name and hot_name
def find_srv_by_name_and_hostname(self, host_name, sdescr):
key = (host_name, sdescr)
return self.name_to_item.get(key, None)
# Make link between elements:
# service -> host
# service -> command
# service -> timeperiods
# service -> contacts
def linkify(self, hosts, commands, timeperiods, contacts,
resultmodulations, businessimpactmodulations, escalations,
servicegroups, triggers, checkmodulations, macromodulations):
self.linkify_with_timeperiods(timeperiods, 'notification_period')
self.linkify_with_timeperiods(timeperiods, 'check_period')
self.linkify_with_timeperiods(timeperiods, 'maintenance_period')
self.linkify_with_timeperiods(timeperiods, 'snapshot_period')
self.linkify_s_by_hst(hosts)
self.linkify_s_by_sg(servicegroups)
self.linkify_one_command_with_commands(commands, 'check_command')
self.linkify_one_command_with_commands(commands, 'event_handler')
self.linkify_one_command_with_commands(commands, 'snapshot_command')
self.linkify_with_contacts(contacts)
self.linkify_with_resultmodulations(resultmodulations)
self.linkify_with_business_impact_modulations(businessimpactmodulations)
# WARNING: all escalations will not be link here
# (just the escalation here, not serviceesca or hostesca).
# This last one will be link in escalations linkify.
self.linkify_with_escalations(escalations)
self.linkify_with_triggers(triggers)
self.linkify_with_checkmodulations(checkmodulations)
self.linkify_with_macromodulations(macromodulations)
def override_properties(self, hosts):
ovr_re = re.compile(r'^([^,]+),([^\s]+)\s+(.*)$')
ovr_hosts = [h for h in hosts if getattr(h, 'service_overrides', None)]
for host in ovr_hosts:
# We're only looking for hosts having service overrides defined
if isinstance(host.service_overrides, list):
service_overrides = host.service_overrides
else:
service_overrides = [host.service_overrides]
for ovr in service_overrides:
# Checks service override syntax
match = ovr_re.search(ovr)
if match is None:
err = "Error: invalid service override syntax: %s" % ovr
host.configuration_errors.append(err)
continue
sdescr, prop, value = match.groups()
# Checks if override is allowed
excludes = ['host_name', 'service_description', 'use',
'servicegroups', 'trigger', 'trigger_name']
if prop in excludes:
err = "Error: trying to override '%s', a forbidden property for service '%s'" % \
(prop, sdescr)
host.configuration_errors.append(err)
continue
# Looks for corresponding services
services = self.get_ovr_services_from_expression(host, sdescr)
if not services:
err = "Error: trying to override property '%s' on " \
"service identified by '%s' " \
"but it's unknown for this host" % (prop, sdescr)
host.configuration_errors.append(err)
continue
value = Service.properties[prop].pythonize(value)
for service in services:
# Pythonize the value because here value is str.
setattr(service, prop, value)
def get_ovr_services_from_expression(self, host, sdesc):
hostname = getattr(host, "host_name", "")
if sdesc == "*":
filters = [filter_service_by_host_name(hostname)]
return self.find_by_filter(filters)
elif sdesc.startswith("r:"):
pattern = sdesc[2:]
filters = [
filter_service_by_host_name(hostname),
filter_service_by_regex_name(pattern)
]
return self.find_by_filter(filters)
else:
svc = self.find_srv_by_name_and_hostname(hostname, sdesc)
if svc is not None:
return [svc]
else:
return []
# We can link services with hosts so
# We can search in O(hosts) instead
# of O(services) for common cases
def optimize_service_search(self, hosts):
self.hosts = hosts
# We just search for each host the id of the host
# and replace the name by the id
# + inform the host we are a service of him
def linkify_s_by_hst(self, hosts):
for s in self:
# If we do not have a host_name, we set it as
# a template element to delete. (like Nagios)
if not hasattr(s, 'host_name'):
s.host = None
continue
try:
hst_name = s.host_name
# The new member list, in id
hst = hosts.find_by_name(hst_name)
s.host = hst
# Let the host know we are his service
if s.host is not None:
hst.add_service_link(s)
else: # Ok, the host do not exists!
err = "Warning: the service '%s' got an invalid host_name '%s'" % \
(self.get_name(), hst_name)
s.configuration_warnings.append(err)
continue
except AttributeError, exp:
pass # Will be catch at the is_correct moment
# We look for servicegroups property in services and
# link them
def linkify_s_by_sg(self, servicegroups):
for s in self:
new_servicegroups = []
if hasattr(s, 'servicegroups') and s.servicegroups != '':
for sg_name in s.servicegroups:
sg_name = sg_name.strip()
sg = servicegroups.find_by_name(sg_name)
if sg is not None:
new_servicegroups.append(sg)
else:
err = "Error: the servicegroup '%s' of the service '%s' is unknown" %\
(sg_name, s.get_dbg_name())
s.configuration_errors.append(err)
s.servicegroups = new_servicegroups
# In the scheduler we need to relink the commandCall with
# the real commands
def late_linkify_s_by_commands(self, commands):
props = ['check_command', 'event_handler', 'snapshot_command']
for s in self:
for prop in props:
cc = getattr(s, prop, None)
if cc:
cc.late_linkify_with_command(commands)
# Delete services by ids
def delete_services_by_id(self, ids):
for id in ids:
del self[id]
# Apply implicit inheritance for special properties:
# contact_groups, notification_interval , notification_period
# So service will take info from host if necessary
def apply_implicit_inheritance(self, hosts):
for prop in ('contacts', 'contact_groups', 'notification_interval',
'notification_period', 'resultmodulations', 'business_impact_modulations',
'escalations', 'poller_tag', 'reactionner_tag', 'check_period',
'business_impact', 'maintenance_period'):
for s in self:
if not hasattr(s, prop) and hasattr(s, 'host_name'):
h = hosts.find_by_name(s.host_name)
if h is not None and hasattr(h, prop):
setattr(s, prop, getattr(h, prop))
# Create dependencies for services (daddy ones)
def apply_dependencies(self):
for s in self:
s.fill_daddy_dependency()
def set_initial_state(self):
"""
Sets services initial state if required in configuration
"""
for s in self:
s.set_initial_state()
# For services the main clean is about service with bad hosts
def clean(self):
to_del = []
for s in self:
if not s.host:
to_del.append(s.id)
for sid in to_del:
del self.items[sid]
def explode_services_from_hosts(self, hosts, s, hnames):
"""
Explodes a service based on a lis of hosts.
:param hosts: The hosts container
:param s: The base service to explode
:param hnames: The host_name list to exlode sevice on
"""
duplicate_for_hosts = [] # get the list of our host_names if more than 1
not_hosts = [] # the list of !host_name so we remove them after
for hname in hnames:
hname = hname.strip()
# If the name begin with a !, we put it in
# the not list
if hname.startswith('!'):
not_hosts.append(hname[1:])
else: # the standard list
duplicate_for_hosts.append(hname)
# remove duplicate items from duplicate_for_hosts:
duplicate_for_hosts = list(set(duplicate_for_hosts))
# Ok now we clean the duplicate_for_hosts with all hosts
# of the not
for hname in not_hosts:
try:
duplicate_for_hosts.remove(hname)
except IndexError:
pass
# Now we duplicate the service for all host_names
for hname in duplicate_for_hosts:
h = hosts.find_by_name(hname)
if h is None:
err = 'Error: The hostname %s is unknown for the ' \
'service %s!' % (hname, s.get_name())
s.configuration_errors.append(err)
continue
if h.is_excluded_for(s):
continue
new_s = s.copy()
new_s.host_name = hname
self.add_item(new_s)
def _local_create_service(self, hosts, host_name, service):
'''Create a new service based on a host_name and service instance.
:param hosts: The hosts items instance.
:type hosts: shinken.objects.host.Hosts
:param host_name: The host_name to create a new service.
:param service: The service to be used as template.
:type service: Service
:return: The new service created.
:rtype: Service
'''
h = hosts.find_by_name(host_name.strip())
if h.is_excluded_for(service):
return
# Creates concrete instance
new_s = service.copy()
new_s.host_name = host_name
new_s.register = 1
if new_s.is_duplicate():
self.add_item(new_s, index=False)
else:
self.add_item(new_s)
return new_s
def explode_services_from_templates(self, hosts, service):
"""
Explodes services from templates. All hosts holding the specified
templates are bound the service.
:param hosts: The hosts container.
:type hosts: shinken.objects.host.Hosts
:param service: The service to explode.
:type service: Service
"""
hname = getattr(service, "host_name", None)
if not hname:
return
# Now really create the services
if is_complex_expr(hname):
hnames = self.evaluate_hostgroup_expression(
hname.strip(), hosts, hosts.templates, look_in='templates')
for name in hnames:
self._local_create_service(hosts, name, service)
else:
hnames = [n.strip() for n in hname.split(',') if n.strip()]
for hname in hnames:
for name in hosts.find_hosts_that_use_template(hname):
self._local_create_service(hosts, name, service)
def explode_services_duplicates(self, hosts, s):
"""
Explodes services holding a `duplicate_foreach` clause.
:param hosts: The hosts container
:param s: The service to explode
:type s: Service
"""
hname = getattr(s, "host_name", None)
if hname is None:
return
# the generator case, we must create several new services
# we must find our host, and get all key:value we need
h = hosts.find_by_name(hname.strip())
if h is None:
err = 'Error: The hostname %s is unknown for the ' \
'service %s!' % (hname, s.get_name())
s.configuration_errors.append(err)
return
# Duplicate services
for new_s in s.duplicate(h):
if h.is_excluded_for(new_s):
continue
# Adds concrete instance
self.add_item(new_s)
def register_service_into_servicegroups(self, s, servicegroups):
"""
Registers a service into the service groups declared in its
`servicegroups` attribute.
:param s: The service to register
:param servicegroups: The servicegroups container
"""
if hasattr(s, 'service_description'):
sname = s.service_description
shname = getattr(s, 'host_name', '')
if hasattr(s, 'servicegroups'):
# Todo: See if we can remove this if
if isinstance(s.servicegroups, list):
sgs = s.servicegroups
else:
sgs = s.servicegroups.split(',')
for sg in sgs:
servicegroups.add_member([shname, sname], sg.strip())
def register_service_dependencies(self, s, servicedependencies):
"""
Registers a service dependencies.
:param s: The service to register
:param servicedependencies: The servicedependencies container
"""
# We explode service_dependencies into Servicedependency
# We just create serviceDep with goods values (as STRING!),
# the link pass will be done after
sdeps = [d.strip() for d in
getattr(s, "service_dependencies", [])]
# %2=0 are for hosts, !=0 are for service_description
i = 0
hname = ''
for elt in sdeps:
if i % 2 == 0: # host
hname = elt
else: # description
desc = elt
# we can register it (s) (depend on) -> (hname, desc)
# If we do not have enough data for s, it's no use
if hasattr(s, 'service_description') and hasattr(s, 'host_name'):
if hname == '':
hname = s.host_name
servicedependencies.add_service_dependency(
s.host_name, s.service_description, hname, desc)
i += 1
# We create new service if necessary (host groups and co)
def explode(self, hosts, hostgroups, contactgroups,
servicegroups, servicedependencies, triggers):
"""
Explodes services, from host_name, hostgroup_name, and from templetes.
:param hosts: The hosts container
:param hostgroups: The hostgoups container
:param contactgroups: The concactgoups container
:param servicegroups: The servicegoups container
:param servicedependencies: The servicedependencies container
:param triggers: The triggers container
"""
# items::explode_trigger_string_into_triggers
self.explode_trigger_string_into_triggers(triggers)
for t in self.templates.values():
self.explode_contact_groups_into_contacts(t, contactgroups)
self.explode_services_from_templates(hosts, t)
# Explode services that have a duplicate_foreach clause
duplicates = [s.id for s in self if s.is_duplicate()]
for id in duplicates:
s = self.items[id]
self.explode_services_duplicates(hosts, s)
if not s.configuration_errors:
self.remove_item(s)
# Then for every host create a copy of the service with just the host
# because we are adding services, we can't just loop in it
for s in self.items.values():
# items::explode_host_groups_into_hosts
# take all hosts from our hostgroup_name into our host_name property
self.explode_host_groups_into_hosts(s, hosts, hostgroups)
# items::explode_contact_groups_into_contacts
# take all contacts from our contact_groups into our contact property
self.explode_contact_groups_into_contacts(s, contactgroups)
hnames = getattr(s, "host_name", '')
hnames = list(set([n.strip() for n in hnames.split(',') if n.strip()]))
# hnames = strip_and_uniq(hnames)
# We will duplicate if we have multiple host_name
# or if we are a template (so a clean service)
if len(hnames) == 1:
self.index_item(s)
else:
if len(hnames) >= 2:
self.explode_services_from_hosts(hosts, s, hnames)
# Delete expanded source service
if not s.configuration_errors:
self.remove_item(s)
to_remove = []
for service in self:
host = hosts.find_by_name(service.host_name)
if host and host.is_excluded_for(service):
to_remove.append(service)
for service in to_remove:
self.remove_item(service)
# Servicegroups property need to be fullfill for got the informations
# And then just register to this service_group
for s in self:
self.register_service_into_servicegroups(s, servicegroups)
self.register_service_dependencies(s, servicedependencies)
# Will create all business tree for the
# services
def create_business_rules(self, hosts, services):
for s in self:
s.create_business_rules(hosts, services)
# Will link all business service/host with theirs
# dep for problem/impact link
def create_business_rules_dependencies(self):
for s in self:
s.create_business_rules_dependencies()
|
40123151ChengYu/2015cd_midterm | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/unittest/test/_test_warnings.py | 858 | # helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
|
sgerhart/ansible | refs/heads/maintenance_policy_module | test/units/plugins/filter/test_network.py | 41 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import pytest
from units.compat import unittest
from ansible.plugins.filter.network import parse_xml, type5_pw, hash_salt, comp_type5, vlan_parser
from ansible.errors import AnsibleFilterError
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures', 'network')
with open(os.path.join(fixture_path, 'show_vlans_xml_output.txt')) as f:
output_xml = f.read()
class TestNetworkParseFilter(unittest.TestCase):
@unittest.skipIf(sys.version_info[:2] == (2, 6), 'XPath expression not supported in this version')
def test_parse_xml_to_list_of_dict(self):
spec_file_path = os.path.join(fixture_path, 'show_vlans_xml_spec.yml')
parsed = parse_xml(output_xml, spec_file_path)
expected = {'vlans': [{'name': 'test-1', 'enabled': True, 'state': 'active', 'interface': None, 'vlan_id': 100, 'desc': None},
{'name': 'test-2', 'enabled': True, 'state': 'active', 'interface': None, 'vlan_id': None, 'desc': None},
{'name': 'test-3', 'enabled': True, 'state': 'active', 'interface': 'em3.0', 'vlan_id': 300, 'desc': 'test vlan-3'},
{'name': 'test-4', 'enabled': False, 'state': 'inactive', 'interface': None, 'vlan_id': 400, 'desc': 'test vlan-4'},
{'name': 'test-5', 'enabled': False, 'state': 'inactive', 'interface': 'em5.0', 'vlan_id': 500, 'desc': 'test vlan-5'}]}
self.assertEqual(parsed, expected)
@unittest.skipIf(sys.version_info[:2] == (2, 6), 'XPath expression not supported in this version')
def test_parse_xml_to_dict(self):
spec_file_path = os.path.join(fixture_path, 'show_vlans_xml_with_key_spec.yml')
parsed = parse_xml(output_xml, spec_file_path)
expected = {'vlans': {'test-4': {'name': 'test-4', 'enabled': False, 'state': 'inactive', 'interface': None, 'vlan_id': 400, 'desc': 'test vlan-4'},
'test-3': {'name': 'test-3', 'enabled': True, 'state': 'active', 'interface': 'em3.0', 'vlan_id': 300, 'desc': 'test vlan-3'},
'test-1': {'name': 'test-1', 'enabled': True, 'state': 'active', 'interface': None, 'vlan_id': 100, 'desc': None},
'test-5': {'name': 'test-5', 'enabled': False, 'state': 'inactive', 'interface': 'em5.0', 'vlan_id': 500, 'desc': 'test vlan-5'},
'test-2': {'name': 'test-2', 'enabled': True, 'state': 'active', 'interface': None, 'vlan_id': None, 'desc': None}}
}
self.assertEqual(parsed, expected)
@unittest.skipIf(sys.version_info[:2] == (2, 6), 'XPath expression not supported in this version')
def test_parse_xml_with_condition_spec(self):
spec_file_path = os.path.join(fixture_path, 'show_vlans_xml_with_condition_spec.yml')
parsed = parse_xml(output_xml, spec_file_path)
expected = {'vlans': [{'name': 'test-5', 'enabled': False, 'state': 'inactive', 'interface': 'em5.0', 'vlan_id': 500, 'desc': 'test vlan-5'}]}
self.assertEqual(parsed, expected)
def test_parse_xml_with_single_value_spec(self):
spec_file_path = os.path.join(fixture_path, 'show_vlans_xml_single_value_spec.yml')
parsed = parse_xml(output_xml, spec_file_path)
expected = {'vlans': ['test-1', 'test-2', 'test-3', 'test-4', 'test-5']}
self.assertEqual(parsed, expected)
def test_parse_xml_validate_input(self):
spec_file_path = os.path.join(fixture_path, 'show_vlans_xml_spec.yml')
output = 10
with self.assertRaises(Exception) as e:
parse_xml(output_xml, 'junk_path')
self.assertEqual("unable to locate parse_xml template: junk_path", str(e.exception))
with self.assertRaises(Exception) as e:
parse_xml(output, spec_file_path)
self.assertEqual("parse_xml works on string input, but given input of : %s" % type(output), str(e.exception))
class TestNetworkType5(unittest.TestCase):
def test_defined_salt_success(self):
password = 'cisco'
salt = 'nTc1'
expected = '$1$nTc1$Z28sUTcWfXlvVe2x.3XAa.'
parsed = type5_pw(password, salt)
self.assertEqual(parsed, expected)
def test_undefined_salt_success(self):
password = 'cisco'
parsed = type5_pw(password)
self.assertEqual(len(parsed), 30)
def test_wrong_data_type(self):
with self.assertRaises(Exception) as e:
type5_pw([])
self.assertEqual("type5_pw password input should be a string, but was given a input of list", str(e.exception))
with self.assertRaises(Exception) as e:
type5_pw({})
self.assertEqual("type5_pw password input should be a string, but was given a input of dict", str(e.exception))
with self.assertRaises(Exception) as e:
type5_pw('pass', [])
self.assertEqual("type5_pw salt input should be a string, but was given a input of list", str(e.exception))
with self.assertRaises(Exception) as e:
type5_pw('pass', {})
self.assertEqual("type5_pw salt input should be a string, but was given a input of dict", str(e.exception))
def test_bad_salt_char(self):
with self.assertRaises(Exception) as e:
type5_pw('password', '*()')
self.assertEqual("type5_pw salt used inproper characters, must be one of "
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./", str(e.exception))
with self.assertRaises(Exception) as e:
type5_pw('password', 'asd$')
self.assertEqual("type5_pw salt used inproper characters, must be one of "
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./", str(e.exception))
class TestHashSalt(unittest.TestCase):
def test_retrieve_salt(self):
password = '$1$nTc1$Z28sUTcWfXlvVe2x.3XAa.'
parsed = hash_salt(password)
self.assertEqual(parsed, 'nTc1')
password = '$2y$14$wHhBmAgOMZEld9iJtV.'
parsed = hash_salt(password)
self.assertEqual(parsed, '14')
def test_unparseable_salt(self):
password = '$nTc1$Z28sUTcWfXlvVe2x.3XAa.'
with self.assertRaises(Exception) as e:
parsed = hash_salt(password)
self.assertEqual("Could not parse salt out password correctly from $nTc1$Z28sUTcWfXlvVe2x.3XAa.", str(e.exception))
class TestCompareType5(unittest.TestCase):
def test_compare_type5_boolean(self):
unencrypted_password = 'cisco'
encrypted_password = '$1$nTc1$Z28sUTcWfXlvVe2x.3XAa.'
parsed = comp_type5(unencrypted_password, encrypted_password)
self.assertEqual(parsed, True)
def test_compare_type5_string(self):
unencrypted_password = 'cisco'
encrypted_password = '$1$nTc1$Z28sUTcWfXlvVe2x.3XAa.'
parsed = comp_type5(unencrypted_password, encrypted_password, True)
self.assertEqual(parsed, '$1$nTc1$Z28sUTcWfXlvVe2x.3XAa.')
def test_compate_type5_fail(self):
unencrypted_password = 'invalid_password'
encrypted_password = '$1$nTc1$Z28sUTcWfXlvVe2x.3XAa.'
parsed = comp_type5(unencrypted_password, encrypted_password)
self.assertEqual(parsed, False)
class TestVlanParser(unittest.TestCase):
def test_compression(self):
raw_list = [1, 2, 3]
parsed_list = ['1-3']
self.assertEqual(vlan_parser(raw_list), parsed_list)
def test_single_line(self):
raw_list = [100, 1688, 3002, 3003, 3004, 3005, 3102, 3103, 3104, 3105, 3802, 3900, 3998, 3999]
parsed_list = ['100,1688,3002-3005,3102-3105,3802,3900,3998,3999']
self.assertEqual(vlan_parser(raw_list), parsed_list)
def test_multi_line(self):
raw_list = [100, 1688, 3002, 3004, 3005, 3050, 3102, 3104, 3105, 3151, 3802, 3900, 3998, 3999]
parsed_list = ['100,1688,3002,3004,3005,3050,3102,3104,3105,3151', '3802,3900,3998,3999']
self.assertEqual(vlan_parser(raw_list), parsed_list)
|
bravominski/PennApps2015-HeartMates | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/api.py | 361 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request in seconds.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
return session.request(method=method, url=url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
|
greg-hellings/FrameworkBenchmarks | refs/heads/master | toolset/setup/linux/setup_util.py | 40 | import re
import os
import sys
import subprocess
import platform
from threading import Thread
from Queue import Queue, Empty
class NonBlockingStreamReader:
'''
Enables calling readline in a non-blocking manner with a blocking stream,
such as the ones returned from subprocess.Popen
Originally written by Eyal Arubas, who granted permission to use this inside TFB
See http://eyalarubas.com/python-subproc-nonblock.html
'''
def __init__(self, stream, eof_message = None):
'''
stream: the stream to read from.
Usually a process' stdout or stderr.
eof_message: A message to print to stdout as soon
as the stream's end is reached. Useful if you
want to track the exact moment a stream terminates
'''
self._s = stream
self._q = Queue()
self._eof_message = eof_message
self._poisonpill = 'MAGIC_POISONPILL_STRING'
def _populateQueue(stream, queue):
while True:
line = stream.readline()
if line: # 'data\n' or '\n'
queue.put(line)
else: # '' e.g. EOF
if self._eof_message:
sys.stdout.write(self._eof_message + '\n')
queue.put(self._poisonpill)
return
self._t = Thread(target = _populateQueue,
args = (self._s, self._q))
self._t.daemon = True
self._t.start()
def readline(self, timeout = None):
try:
line = self._q.get(block = timeout is not None,
timeout = timeout)
if line == self._poisonpill:
raise EndOfStream
return line
except Empty:
return None
class EndOfStream(Exception): pass
# Replaces all text found using the regular expression to_replace with the supplied replacement.
def replace_text(file, to_replace, replacement):
with open(file, "r") as conf:
contents = conf.read()
replaced_text = re.sub(to_replace, replacement, contents)
with open(file, "w") as f:
f.write(replaced_text)
# Replaces the current process environment with the one found in
# config file. Retains a few original vars (HOME,PATH, etc) by default.
# Optionally allows specification of a command to be run before loading
# the environment, to allow the framework to set environment variables
# Note: This command *cannot* print to stdout!
#
# Note: This will not replace the sudo environment (e.g. subprocess.check_call("sudo <command>")).
# If you must use sudo, consider sudo sh -c ". <config> && your_command"
def replace_environ(config=None, root=None, print_result=False, command='true'):
if platform.system().lower() == 'windows':
pass
else:
# Clean up our current environment, preserving some important items
mini_environ = {}
for envname in ['HOME', 'PATH', 'LANG', 'USER', 'LD_LIBRARY_PATH', 'PYTHONPATH', 'FWROOT', 'TRAVIS']:
if envname in os.environ:
mini_environ[envname] = os.environ[envname]
for key in os.environ:
if key.startswith(('TFB_', 'TRAVIS_')): # Any TFB_* and TRAVIS_* variables are preserved
mini_environ[key] = os.environ[key]
os.environ.clear()
# Use FWROOT if explicitely provided
if root is not None:
mini_environ['FWROOT']=root
# Run command, source config file, and store resulting environment
setup_env = "%s && . %s && env" % (command, config)
env = ""
try:
env = subprocess.check_output(setup_env, shell=True, env=mini_environ,
executable='/bin/bash')
except subprocess.CalledProcessError:
# Ensure that an error here does not crash the toolset
print "CRITICAL: Loading %s returned non-zero exit" % config
for key,value in mini_environ.iteritems():
os.environ[key]=value
return
for line in env.split('\n'):
try:
key, value = line.split('=', 1)
# If we already have this TFB_ variable, do not overwrite
if key.startswith('TFB_') and key in mini_environ:
os.environ[key]=mini_environ[key]
else:
os.environ[key]=value
except Exception:
if not line: # Don't warn for empty line
continue
print "WARN: Line '%s' from '%s' is not an environment variable" % (line, config)
continue
if print_result:
out = subprocess.check_output('env', shell=True, executable='/bin/bash')
print "Environment after loading %s" %config
print out
# Queries the shell for the value of FWROOT
def get_fwroot():
if platform.system().lower() == 'windows':
fwroot = "C:\FrameworkBenchmarks"
return fwroot
else:
try:
# Use printf to avoid getting a newline
# Redirect to avoid stderr printing
fwroot = subprocess.check_output('printf $FWROOT 2> /dev/null', shell=True, executable='/bin/bash')
return fwroot
except subprocess.CalledProcessError:
# Make a last-guess effort ;-)
return os.getcwd();
# Turns absolute path into path relative to FWROOT
# Assumes path is underneath FWROOT, not above
#
# Useful for clean presentation of paths
# e.g. /foo/bar/benchmarks/go/install.sh
# v.s. FWROOT/go/install.sh
def path_relative_to_root(path):
# Requires bash shell parameter expansion
return subprocess.check_output("D=%s && printf \"${D#%s}\""%(path, get_fwroot()), shell=True, executable='/bin/bash')
|
alisaifee/AutobahnPython | refs/heads/master | examples/twisted/wamp1/rpc/symmetric/client.py | 17 | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys, time
from twisted.python import log
from twisted.internet import reactor
from autobahn.twisted.websocket import connectWS
from autobahn.wamp1.protocol import exportRpc, \
WampClientFactory, \
WampClientProtocol
class MyClientProtocol(WampClientProtocol):
@exportRpc("getTime")
def getTime(self):
return time.strftime("%H:%M:%S", time.localtime())
def onSessionOpen(self):
self.registerForRpc(self, "http://example.com/client#")
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WampClientFactory("ws://localhost:9000", debugWamp = debug)
factory.protocol = MyClientProtocol
connectWS(factory)
reactor.run()
|
js0701/chromium-crosswalk | refs/heads/master | tools/telemetry/third_party/altgraph/altgraph_tests/test_graphstat.py | 25 | import unittest
from altgraph import GraphStat
from altgraph import Graph
import sys
class TestDegreesDist (unittest.TestCase):
def test_simple(self):
a = Graph.Graph()
self.assertEqual(GraphStat.degree_dist(a), [])
a.add_node(1)
a.add_node(2)
a.add_node(3)
self.assertEqual(GraphStat.degree_dist(a), GraphStat._binning([0, 0, 0]))
for x in range(100):
a.add_node(x)
for x in range(1, 100):
for y in range(1, 50):
if x % y == 0:
a.add_edge(x, y)
counts_inc = []
counts_out = []
for n in a:
counts_inc.append(a.inc_degree(n))
counts_out.append(a.out_degree(n))
self.assertEqual(GraphStat.degree_dist(a), GraphStat._binning(counts_out))
self.assertEqual(GraphStat.degree_dist(a, mode='inc'), GraphStat._binning(counts_inc))
class TestBinning (unittest.TestCase):
def test_simple(self):
# Binning [0, 100) into 10 bins
a = list(range(100))
out = GraphStat._binning(a, limits=(0, 100), bin_num=10)
self.assertEqual(out,
[ (x*1.0, 10) for x in range(5, 100, 10) ])
# Check that outliers are ignored.
a = list(range(100))
out = GraphStat._binning(a, limits=(0, 90), bin_num=9)
self.assertEqual(out,
[ (x*1.0, 10) for x in range(5, 90, 10) ])
out = GraphStat._binning(a, limits=(0, 100), bin_num=15)
binSize = 100 / 15.0
result = [0]*15
for i in range(100):
bin = int(i/binSize)
try:
result[bin] += 1
except IndexError:
pass
result = [ (i * binSize + binSize/2, result[i]) for i in range(len(result))]
self.assertEqual(result, out)
if __name__ == "__main__": # pragma: no cover
unittest.main()
|
palerdot/calibre | refs/heads/master | src/calibre/gui2/actions/delete.py | 4 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import errno
from functools import partial
from collections import Counter
from PyQt4.Qt import QObject, QTimer, QModelIndex
from calibre.gui2 import error_dialog, question_dialog
from calibre.gui2.dialogs.delete_matching_from_device import DeleteMatchingFromDeviceDialog
from calibre.gui2.dialogs.confirm_delete import confirm
from calibre.gui2.dialogs.confirm_delete_location import confirm_location
from calibre.gui2.actions import InterfaceAction
from calibre.utils.recycle_bin import can_recycle
single_shot = partial(QTimer.singleShot, 10)
class MultiDeleter(QObject): # {{{
def __init__(self, gui, ids, callback):
from calibre.gui2.dialogs.progress import ProgressDialog
QObject.__init__(self, gui)
self.model = gui.library_view.model()
self.ids = ids
self.permanent = False
if can_recycle and len(ids) > 100:
if question_dialog(gui, _('Are you sure?'), '<p>'+
_('You are trying to delete %d books. '
'Sending so many files to the Recycle'
' Bin <b>can be slow</b>. Should calibre skip the'
' Recycle Bin? If you click Yes the files'
' will be <b>permanently deleted</b>.')%len(ids)):
self.permanent = True
self.gui = gui
self.failures = []
self.deleted_ids = []
self.callback = callback
single_shot(self.delete_one)
self.pd = ProgressDialog(_('Deleting...'), parent=gui,
cancelable=False, min=0, max=len(self.ids))
self.pd.setModal(True)
self.pd.show()
def delete_one(self):
if not self.ids:
self.cleanup()
return
id_ = self.ids.pop()
title = 'id:%d'%id_
try:
title_ = self.model.db.title(id_, index_is_id=True)
if title_:
title = title_
self.model.db.delete_book(id_, notify=False, commit=False,
permanent=self.permanent)
self.deleted_ids.append(id_)
except:
import traceback
self.failures.append((id_, title, traceback.format_exc()))
single_shot(self.delete_one)
self.pd.value += 1
self.pd.set_msg(_('Deleted') + ' ' + title)
def cleanup(self):
self.pd.hide()
self.pd = None
self.model.db.commit()
self.model.db.clean()
self.model.books_deleted()
self.gui.tags_view.recount()
self.callback(self.deleted_ids)
if self.failures:
msg = ['==> '+x[1]+'\n'+x[2] for x in self.failures]
error_dialog(self.gui, _('Failed to delete'),
_('Failed to delete some books, click the Show Details button'
' for details.'), det_msg='\n\n'.join(msg), show=True)
# }}}
class DeleteAction(InterfaceAction):
name = 'Remove Books'
action_spec = (_('Remove books'), 'trash.png', _('Delete books'), 'Del')
action_type = 'current'
action_add_menu = True
action_menu_clone_qaction = _('Remove selected books')
accepts_drops = True
def accept_enter_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def accept_drag_move_event(self, event, mime_data):
if mime_data.hasFormat("application/calibre+from_library"):
return True
return False
def drop_event(self, event, mime_data):
mime = 'application/calibre+from_library'
if mime_data.hasFormat(mime):
self.dropped_ids = tuple(map(int, str(mime_data.data(mime)).split()))
QTimer.singleShot(1, self.do_drop)
return True
return False
def do_drop(self):
book_ids = self.dropped_ids
del self.dropped_ids
if book_ids:
self.do_library_delete(book_ids)
def genesis(self):
self.qaction.triggered.connect(self.delete_books)
self.delete_menu = self.qaction.menu()
m = partial(self.create_menu_action, self.delete_menu)
m('delete-specific',
_('Remove files of a specific format from selected books..'),
triggered=self.delete_selected_formats)
m('delete-except',
_('Remove all formats from selected books, except...'),
triggered=self.delete_all_but_selected_formats)
m('delete-all',
_('Remove all formats from selected books'),
triggered=self.delete_all_formats)
m('delete-covers',
_('Remove covers from selected books'),
triggered=self.delete_covers)
self.delete_menu.addSeparator()
m('delete-matching',
_('Remove matching books from device'),
triggered=self.remove_matching_books_from_device)
self.qaction.setMenu(self.delete_menu)
self.delete_memory = {}
def location_selected(self, loc):
enabled = loc == 'library'
for action in list(self.delete_menu.actions())[1:]:
action.setEnabled(enabled)
def _get_selected_formats(self, msg, ids, exclude=False, single=False):
from calibre.gui2.dialogs.select_formats import SelectFormats
c = Counter()
db = self.gui.library_view.model().db
for x in ids:
fmts_ = db.formats(x, index_is_id=True, verify_formats=False)
if fmts_:
for x in frozenset([x.lower() for x in fmts_.split(',')]):
c[x] += 1
d = SelectFormats(c, msg, parent=self.gui, exclude=exclude,
single=single)
if d.exec_() != d.Accepted:
return None
return d.selected_formats
def _get_selected_ids(self, err_title=_('Cannot delete')):
rows = self.gui.library_view.selectionModel().selectedRows()
if not rows or len(rows) == 0:
d = error_dialog(self.gui, err_title, _('No book selected'))
d.exec_()
return set([])
return set(map(self.gui.library_view.model().id, rows))
def remove_format_by_id(self, book_id, fmt):
title = self.gui.current_db.title(book_id, index_is_id=True)
if not confirm('<p>'+(_(
'The %(fmt)s format will be <b>permanently deleted</b> from '
'%(title)s. Are you sure?')%dict(fmt=fmt, title=title))
+'</p>', 'library_delete_specific_format', self.gui):
return
self.gui.library_view.model().db.remove_format(book_id, fmt,
index_is_id=True, notify=False)
self.gui.library_view.model().refresh_ids([book_id])
self.gui.library_view.model().current_changed(self.gui.library_view.currentIndex(),
self.gui.library_view.currentIndex())
self.gui.tags_view.recount()
def restore_format(self, book_id, original_fmt):
self.gui.current_db.restore_original_format(book_id, original_fmt)
self.gui.library_view.model().refresh_ids([book_id])
self.gui.library_view.model().current_changed(self.gui.library_view.currentIndex(),
self.gui.library_view.currentIndex())
self.gui.tags_view.recount()
def delete_selected_formats(self, *args):
ids = self._get_selected_ids()
if not ids:
return
fmts = self._get_selected_formats(
_('Choose formats to be deleted'), ids)
if not fmts:
return
m = self.gui.library_view.model()
m.db.new_api.remove_formats({book_id:fmts for book_id in ids})
m.refresh_ids(ids)
m.current_changed(self.gui.library_view.currentIndex(),
self.gui.library_view.currentIndex())
if ids:
self.gui.tags_view.recount()
def delete_all_but_selected_formats(self, *args):
ids = self._get_selected_ids()
if not ids:
return
fmts = self._get_selected_formats(
'<p>'+_('Choose formats <b>not</b> to be deleted.<p>Note that '
'this will never remove all formats from a book.'), ids,
exclude=True)
if fmts is None:
return
m = self.gui.library_view.model()
removals = {}
for id in ids:
bfmts = m.db.formats(id, index_is_id=True)
if bfmts is None:
continue
bfmts = set([x.lower() for x in bfmts.split(',')])
rfmts = bfmts - set(fmts)
if bfmts - rfmts:
# Do not delete if it will leave the book with no
# formats
removals[id] = rfmts
if removals:
m.db.new_api.remove_formats(removals)
m.refresh_ids(ids)
m.current_changed(self.gui.library_view.currentIndex(),
self.gui.library_view.currentIndex())
if ids:
self.gui.tags_view.recount()
def delete_all_formats(self, *args):
ids = self._get_selected_ids()
if not ids:
return
if not confirm('<p>'+_('<b>All formats</b> for the selected books will '
'be <b>deleted</b> from your library.<br>'
'The book metadata will be kept. Are you sure?')
+'</p>', 'delete_all_formats', self.gui):
return
db = self.gui.library_view.model().db
removals = {}
for id in ids:
fmts = db.formats(id, index_is_id=True, verify_formats=False)
if fmts:
removals[id] = fmts.split(',')
if removals:
db.new_api.remove_formats(removals)
self.gui.library_view.model().refresh_ids(ids)
self.gui.library_view.model().current_changed(self.gui.library_view.currentIndex(),
self.gui.library_view.currentIndex())
if ids:
self.gui.tags_view.recount()
def remove_matching_books_from_device(self, *args):
if not self.gui.device_manager.is_device_present:
d = error_dialog(self.gui, _('Cannot delete books'),
_('No device is connected'))
d.exec_()
return
ids = self._get_selected_ids()
if not ids:
#_get_selected_ids shows a dialog box if nothing is selected, so we
# do not need to show one here
return
to_delete = {}
some_to_delete = False
for model,name in ((self.gui.memory_view.model(), _('Main memory')),
(self.gui.card_a_view.model(), _('Storage Card A')),
(self.gui.card_b_view.model(), _('Storage Card B'))):
to_delete[name] = (model, model.paths_for_db_ids(ids))
if len(to_delete[name][1]) > 0:
some_to_delete = True
if not some_to_delete:
d = error_dialog(self.gui, _('No books to delete'),
_('None of the selected books are on the device'))
d.exec_()
return
d = DeleteMatchingFromDeviceDialog(self.gui, to_delete)
if d.exec_():
paths = {}
ids = {}
for (model, id, path) in d.result:
if model not in paths:
paths[model] = []
ids[model] = []
paths[model].append(path)
ids[model].append(id)
for model in paths:
job = self.gui.remove_paths(paths[model])
self.delete_memory[job] = (paths[model], model)
model.mark_for_deletion(job, ids[model], rows_are_ids=True)
self.gui.status_bar.show_message(_('Deleting books from device.'), 1000)
def delete_covers(self, *args):
ids = self._get_selected_ids()
if not ids:
return
for id in ids:
self.gui.library_view.model().db.remove_cover(id)
self.gui.library_view.model().refresh_ids(ids)
self.gui.library_view.model().current_changed(self.gui.library_view.currentIndex(),
self.gui.library_view.currentIndex())
def library_ids_deleted(self, ids_deleted, current_row=None):
view = self.gui.library_view
for v in (self.gui.memory_view, self.gui.card_a_view, self.gui.card_b_view):
if v is None:
continue
v.model().clear_ondevice(ids_deleted)
if current_row is not None:
ci = view.model().index(current_row, 0)
if not ci.isValid():
# Current row is after the last row, set it to the last row
current_row = view.row_count() - 1
view.set_current_row(current_row)
if view.model().rowCount(QModelIndex()) < 1:
self.gui.book_details.reset_info()
def library_ids_deleted2(self, ids_deleted, next_id=None):
view = self.gui.library_view
current_row = None
if next_id is not None:
rmap = view.ids_to_rows([next_id])
current_row = rmap.get(next_id, None)
self.library_ids_deleted(ids_deleted, current_row=current_row)
def do_library_delete(self, to_delete_ids):
view = self.gui.current_view()
# Ask the user if they want to delete the book from the library or device if it is in both.
if self.gui.device_manager.is_device_present:
on_device = False
on_device_ids = self._get_selected_ids()
for id in on_device_ids:
res = self.gui.book_on_device(id)
if res[0] or res[1] or res[2]:
on_device = True
if on_device:
break
if on_device:
loc = confirm_location('<p>' + _('Some of the selected books are on the attached device. '
'<b>Where</b> do you want the selected files deleted from?'),
self.gui)
if not loc:
return
elif loc == 'dev':
self.remove_matching_books_from_device()
return
elif loc == 'both':
self.remove_matching_books_from_device()
# The following will run if the selected books are not on a connected device.
# The user has selected to delete from the library or the device and library.
if not confirm('<p>'+_('The %d selected book(s) will be '
'<b>permanently deleted</b> and the files '
'removed from your calibre library. Are you sure?')%len(to_delete_ids)
+'</p>', 'library_delete_books', self.gui):
return
next_id = view.next_id
if len(to_delete_ids) < 5:
try:
view.model().delete_books_by_id(to_delete_ids)
except IOError as err:
if err.errno == errno.EACCES:
import traceback
fname = getattr(err, 'filename', 'file') or 'file'
return error_dialog(self.gui, _('Permission denied'),
_('Could not access %s. Is it being used by another'
' program? Click "Show details" for more information.')%fname, det_msg=traceback.format_exc(),
show=True)
self.library_ids_deleted2(to_delete_ids, next_id=next_id)
else:
self.__md = MultiDeleter(self.gui, to_delete_ids,
partial(self.library_ids_deleted2, next_id=next_id))
def delete_books(self, *args):
'''
Delete selected books from device or library.
'''
view = self.gui.current_view()
rows = view.selectionModel().selectedRows()
if not rows or len(rows) == 0:
return
# Library view is visible.
if self.gui.stack.currentIndex() == 0:
to_delete_ids = [view.model().id(r) for r in rows]
self.do_library_delete(to_delete_ids)
# Device view is visible.
else:
if self.gui.stack.currentIndex() == 1:
view = self.gui.memory_view
elif self.gui.stack.currentIndex() == 2:
view = self.gui.card_a_view
else:
view = self.gui.card_b_view
paths = view.model().paths(rows)
ids = view.model().indices(rows)
if not confirm('<p>'+_('The %d selected book(s) will be '
'<b>permanently deleted</b> '
'from your device. Are you sure?')%len(paths)
+'</p>', 'device_delete_books', self.gui):
return
job = self.gui.remove_paths(paths)
self.delete_memory[job] = (paths, view.model())
view.model().mark_for_deletion(job, ids, rows_are_ids=True)
self.gui.status_bar.show_message(_('Deleting books from device.'), 1000)
|
dennybaa/st2 | refs/heads/master | st2common/tests/unit/test_datastore.py | 1 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest2
import mock
from st2common.services.datastore import DatastoreService
from st2client.models.keyvalue import KeyValuePair
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
RESOURCES_DIR = os.path.abspath(os.path.join(CURRENT_DIR, '../resources'))
class DatastoreServiceTestCase(unittest2.TestCase):
def setUp(self):
super(DatastoreServiceTestCase, self).setUp()
self._datastore_service = DatastoreService(logger=mock.Mock(),
pack_name='core',
class_name='TestSensor',
api_username='sensor_service')
self._datastore_service._get_api_client = mock.Mock()
def test_datastore_operations_list_values(self):
# Verify prefix filtering
mock_api_client = mock.Mock()
mock_api_client.keys.get_all.return_value = []
self._set_mock_api_client(mock_api_client)
self._datastore_service.list_values(local=True, prefix=None)
mock_api_client.keys.get_all.assert_called_with(prefix='core.TestSensor:')
self._datastore_service.list_values(local=True, prefix='ponies')
mock_api_client.keys.get_all.assert_called_with(prefix='core.TestSensor:ponies')
self._datastore_service.list_values(local=False, prefix=None)
mock_api_client.keys.get_all.assert_called_with(prefix=None)
self._datastore_service.list_values(local=False, prefix='ponies')
mock_api_client.keys.get_all.assert_called_with(prefix='ponies')
# No values in the datastore
mock_api_client = mock.Mock()
mock_api_client.keys.get_all.return_value = []
self._set_mock_api_client(mock_api_client)
values = self._datastore_service.list_values(local=True)
self.assertEqual(values, [])
values = self._datastore_service.list_values(local=False)
self.assertEqual(values, [])
# Values in the datastore
kvp1 = KeyValuePair()
kvp1.name = 'test1'
kvp1.value = 'bar'
kvp2 = KeyValuePair()
kvp2.name = 'test2'
kvp2.value = 'bar'
mock_return_value = [kvp1, kvp2]
mock_api_client.keys.get_all.return_value = mock_return_value
self._set_mock_api_client(mock_api_client)
values = self._datastore_service.list_values(local=True)
self.assertEqual(len(values), 2)
self.assertEqual(values, mock_return_value)
def test_datastore_operations_get_value(self):
mock_api_client = mock.Mock()
kvp1 = KeyValuePair()
kvp1.name = 'test1'
kvp1.value = 'bar'
mock_api_client.keys.get_by_id.return_value = kvp1
self._set_mock_api_client(mock_api_client)
value = self._datastore_service.get_value(name='test1', local=False)
self.assertEqual(value, kvp1.value)
def test_datastore_operations_set_value(self):
mock_api_client = mock.Mock()
mock_api_client.keys.update.return_value = True
self._set_mock_api_client(mock_api_client)
value = self._datastore_service.set_value(name='test1', value='foo', local=False)
self.assertTrue(value)
def test_datastore_operations_delete_value(self):
mock_api_client = mock.Mock()
mock_api_client.keys.delete.return_value = True
self._set_mock_api_client(mock_api_client)
value = self._datastore_service.delete_value(name='test', local=False)
self.assertTrue(value)
def _set_mock_api_client(self, mock_api_client):
mock_method = mock.Mock()
mock_method.return_value = mock_api_client
self._datastore_service._get_api_client = mock_method
|
spektom/incubator-airflow | refs/heads/master | airflow/sensors/__init__.py | 15 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Sensors."""
|
luceatnobis/youtube-dl | refs/heads/master | youtube_dl/extractor/la7.py | 53 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
js_to_json,
smuggle_url,
)
class LA7IE(InfoExtractor):
IE_NAME = 'la7.it'
_VALID_URL = r'''(?x)(https?://)?(?:
(?:www\.)?la7\.it/([^/]+)/(?:rivedila7|video)/|
tg\.la7\.it/repliche-tgla7\?id=
)(?P<id>.+)'''
_TESTS = [{
# 'src' is a plain URL
'url': 'http://www.la7.it/crozza/video/inccool8-02-10-2015-163722',
'md5': '8b613ffc0c4bf9b9e377169fc19c214c',
'info_dict': {
'id': 'inccool8-02-10-2015-163722',
'ext': 'mp4',
'title': 'Inc.Cool8',
'description': 'Benvenuti nell\'incredibile mondo della INC. COOL. 8. dove “INC.” sta per “Incorporated” “COOL” sta per “fashion” ed Eight sta per il gesto atletico',
'thumbnail': 're:^https?://.*',
'uploader_id': 'kdla7pillole@iltrovatore.it',
'timestamp': 1443814869,
'upload_date': '20151002',
},
}, {
# 'src' is a dictionary
'url': 'http://tg.la7.it/repliche-tgla7?id=189080',
'md5': '6b0d8888d286e39870208dfeceaf456b',
'info_dict': {
'id': '189080',
'ext': 'mp4',
'title': 'TG LA7',
},
}, {
'url': 'http://www.la7.it/omnibus/rivedila7/omnibus-news-02-07-2016-189077',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
player_data = self._parse_json(
self._search_regex(r'videoLa7\(({[^;]+})\);', webpage, 'player data'),
video_id, transform_source=js_to_json)
return {
'_type': 'url_transparent',
'url': smuggle_url('kaltura:103:%s' % player_data['vid'], {
'service_url': 'http://kdam.iltrovatore.it',
}),
'id': video_id,
'title': player_data['title'],
'description': self._og_search_description(webpage, default=None),
'thumbnail': player_data.get('poster'),
'ie_key': 'Kaltura',
}
|
YihaoLu/statsmodels | refs/heads/master | statsmodels/sandbox/regression/tools.py | 33 | '''gradient/Jacobian of normal and t loglikelihood
use chain rule
normal derivative wrt mu, sigma and beta
new version: loc-scale distributions, derivative wrt loc, scale
also includes "standardized" t distribution (for use in GARCH)
TODO:
* use sympy for derivative of loglike wrt shape parameters
it works for df of t distribution dlog(gamma(a))da = polygamma(0,a) check
polygamma is available in scipy.special
* get loc-scale example to work with mean = X*b
* write some full unit test examples
A: josef-pktd
'''
from __future__ import print_function
import numpy as np
from scipy import special
from scipy.special import gammaln
def norm_lls(y, params):
'''normal loglikelihood given observations and mean mu and variance sigma2
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
Returns
-------
lls : array
contribution to loglikelihood for each observation
'''
mu, sigma2 = params.T
lls = -0.5*(np.log(2*np.pi) + np.log(sigma2) + (y-mu)**2/sigma2)
return lls
def norm_lls_grad(y, params):
'''Jacobian of normal loglikelihood wrt mean mu and variance sigma2
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
Returns
-------
grad : array (nobs, 2)
derivative of loglikelihood for each observation wrt mean in first
column, and wrt variance in second column
Notes
-----
this is actually the derivative wrt sigma not sigma**2, but evaluated
with parameter sigma2 = sigma**2
'''
mu, sigma2 = params.T
dllsdmu = (y-mu)/sigma2
dllsdsigma2 = ((y-mu)**2/sigma2 - 1)/np.sqrt(sigma2)
return np.column_stack((dllsdmu, dllsdsigma2))
def mean_grad(x, beta):
'''gradient/Jacobian for d (x*beta)/ d beta
'''
return x
def normgrad(y, x, params):
'''Jacobian of normal loglikelihood wrt mean mu and variance sigma2
Parameters
----------
y : array, 1d
normally distributed random variable with mean x*beta, and variance sigma2
x : array, 2d
explanatory variables, observation in rows, variables in columns
params: array_like, (nvars + 1)
array of coefficients and variance (beta, sigma2)
Returns
-------
grad : array (nobs, 2)
derivative of loglikelihood for each observation wrt mean in first
column, and wrt scale (sigma) in second column
assume params = (beta, sigma2)
Notes
-----
TODO: for heteroscedasticity need sigma to be a 1d array
'''
beta = params[:-1]
sigma2 = params[-1]*np.ones((len(y),1))
dmudbeta = mean_grad(x, beta)
mu = np.dot(x, beta)
#print(beta, sigma2)
params2 = np.column_stack((mu,sigma2))
dllsdms = norm_lls_grad(y,params2)
grad = np.column_stack((dllsdms[:,:1]*dmudbeta, dllsdms[:,:1]))
return grad
def tstd_lls(y, params, df):
'''t loglikelihood given observations and mean mu and variance sigma2 = 1
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
df : integer
degrees of freedom of the t distribution
Returns
-------
lls : array
contribution to loglikelihood for each observation
Notes
-----
parameterized for garch
'''
mu, sigma2 = params.T
df = df*1.0
#lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
#lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2.)/sigma2) + 0.5 * np.log(sigma2)
lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2)/sigma2) + 0.5 * np.log(sigma2)
return lls
def norm_dlldy(y):
'''derivative of log pdf of standard normal with respect to y
'''
return -y
def ts_dlldy(y, df):
'''derivative of log pdf of standardized (?) t with respect to y
Notes
-----
parameterized for garch, with mean 0 and variance 1
'''
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
#return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y
return -(df+1)/(df) / (1 + y**2/(df)) * y
def tstd_pdf(x, df):
'''pdf for standardized (not standard) t distribution, variance is one
'''
r = np.array(df*1.0)
Px = np.exp(special.gammaln((r+1)/2.)-special.gammaln(r/2.))/np.sqrt((r-2)*pi)
Px /= (1+(x**2)/(r-2))**((r+1)/2.)
return Px
def ts_lls(y, params, df):
'''t loglikelihood given observations and mean mu and variance sigma2 = 1
Parameters
----------
y : array, 1d
normally distributed random variable
params: array, (nobs, 2)
array of mean, variance (mu, sigma2) with observations in rows
df : integer
degrees of freedom of the t distribution
Returns
-------
lls : array
contribution to loglikelihood for each observation
Notes
-----
parameterized for garch
normalized/rescaled so that sigma2 is the variance
>>> df = 10; sigma = 1.
>>> stats.t.stats(df, loc=0., scale=sigma.*np.sqrt((df-2.)/df))
(array(0.0), array(1.0))
>>> sigma = np.sqrt(2.)
>>> stats.t.stats(df, loc=0., scale=sigma*np.sqrt((df-2.)/df))
(array(0.0), array(2.0))
'''
print(y, params, df)
mu, sigma2 = params.T
df = df*1.0
#lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df-2)*np.pi)
#lls -= (df+1)/2. * np.log(1. + (y-mu)**2/(df-2.)/sigma2) + 0.5 * np.log(sigma2)
lls = gammaln((df+1)/2.) - gammaln(df/2.) - 0.5*np.log((df)*np.pi)
lls -= (df+1.)/2. * np.log(1. + (y-mu)**2/(df)/sigma2) + 0.5 * np.log(sigma2)
return lls
def ts_dlldy(y, df):
'''derivative of log pdf of standard t with respect to y
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
df : array_like
degrees of freedom,shape parameters of log-likelihood function
of t distribution
Returns
-------
dlldy : array
derivative of loglikelihood wrt random variable y evaluated at the
points given in y
Notes
-----
with mean 0 and scale 1, but variance is df/(df-2)
'''
df = df*1.
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
#return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y
return -(df+1)/(df) / (1 + y**2/(df)) * y
def tstd_dlldy(y, df):
'''derivative of log pdf of standardized t with respect to y
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
df : array_like
degrees of freedom,shape parameters of log-likelihood function
of t distribution
Returns
-------
dlldy : array
derivative of loglikelihood wrt random variable y evaluated at the
points given in y
Notes
-----
parameterized for garch, standardized to variance=1
'''
#(df+1)/2. / (1 + y**2/(df-2.)) * 2.*y/(df-2.)
return -(df+1)/(df-2.) / (1 + y**2/(df-2.)) * y
#return (df+1)/(df) / (1 + y**2/(df)) * y
def locscale_grad(y, loc, scale, dlldy, *args):
'''derivative of log-likelihood with respect to location and scale
Parameters
----------
y : array_like
data points of random variable at which loglike is evaluated
loc : float
location parameter of distribution
scale : float
scale parameter of distribution
dlldy : function
derivative of loglikelihood fuction wrt. random variable x
args : array_like
shape parameters of log-likelihood function
Returns
-------
dlldloc : array
derivative of loglikelihood wrt location evaluated at the
points given in y
dlldscale : array
derivative of loglikelihood wrt scale evaluated at the
points given in y
'''
yst = (y-loc)/scale #ystandardized
dlldloc = -dlldy(yst, *args) / scale
dlldscale = -1./scale - dlldy(yst, *args) * (y-loc)/scale**2
return dlldloc, dlldscale
if __name__ == '__main__':
verbose = 0
if verbose:
sig = 0.1
beta = np.ones(2)
rvs = np.random.randn(10,3)
x = rvs[:,1:]
y = np.dot(x,beta) + sig*rvs[:,0]
params = [1,1,1]
print(normgrad(y, x, params))
dllfdbeta = (y-np.dot(x, beta))[:,None]*x #for sigma = 1
print(dllfdbeta)
print(locscale_grad(y, np.dot(x, beta), 1, norm_dlldy))
print(y-np.dot(x, beta))
from scipy import stats, misc
def llt(y,loc,scale,df):
return np.log(stats.t.pdf(y, df, loc=loc, scale=scale))
def lltloc(loc,y,scale,df):
return np.log(stats.t.pdf(y, df, loc=loc, scale=scale))
def lltscale(scale,y,loc,df):
return np.log(stats.t.pdf(y, df, loc=loc, scale=scale))
def llnorm(y,loc,scale):
return np.log(stats.norm.pdf(y, loc=loc, scale=scale))
def llnormloc(loc,y,scale):
return np.log(stats.norm.pdf(y, loc=loc, scale=scale))
def llnormscale(scale,y,loc):
return np.log(stats.norm.pdf(y, loc=loc, scale=scale))
if verbose:
print('\ngradient of t')
print(misc.derivative(llt, 1, dx=1e-6, n=1, args=(0,1,10), order=3))
print('t ', locscale_grad(1, 0, 1, tstd_dlldy, 10))
print('ts', locscale_grad(1, 0, 1, ts_dlldy, 10))
print(misc.derivative(llt, 1.5, dx=1e-10, n=1, args=(0,1,20), order=3),)
print('ts', locscale_grad(1.5, 0, 1, ts_dlldy, 20))
print(misc.derivative(llt, 1.5, dx=1e-10, n=1, args=(0,2,20), order=3),)
print('ts', locscale_grad(1.5, 0, 2, ts_dlldy, 20))
print(misc.derivative(llt, 1.5, dx=1e-10, n=1, args=(1,2,20), order=3),)
print('ts', locscale_grad(1.5, 1, 2, ts_dlldy, 20))
print(misc.derivative(lltloc, 1, dx=1e-10, n=1, args=(1.5,2,20), order=3),)
print(misc.derivative(lltscale, 2, dx=1e-10, n=1, args=(1.5,1,20), order=3))
y,loc,scale,df = 1.5, 1, 2, 20
print('ts', locscale_grad(y,loc,scale, ts_dlldy, 20))
print(misc.derivative(lltloc, loc, dx=1e-10, n=1, args=(y,scale,df), order=3),)
print(misc.derivative(lltscale, scale, dx=1e-10, n=1, args=(y,loc,df), order=3))
print('\ngradient of norm')
print(misc.derivative(llnorm, 1, dx=1e-6, n=1, args=(0,1), order=3))
print(locscale_grad(1, 0, 1, norm_dlldy))
y,loc,scale = 1.5, 1, 2
print('ts', locscale_grad(y,loc,scale, norm_dlldy))
print(misc.derivative(llnormloc, loc, dx=1e-10, n=1, args=(y,scale), order=3),)
print(misc.derivative(llnormscale, scale, dx=1e-10, n=1, args=(y,loc), order=3))
y,loc,scale = 1.5, 0, 1
print('ts', locscale_grad(y,loc,scale, norm_dlldy))
print(misc.derivative(llnormloc, loc, dx=1e-10, n=1, args=(y,scale), order=3),)
print(misc.derivative(llnormscale, scale, dx=1e-10, n=1, args=(y,loc), order=3))
#print('still something wrong with handling of scale and variance'
#looks ok now
print('\nloglike of t')
print(tstd_lls(1, np.array([0,1]), 100), llt(1,0,1,100), 'differently standardized')
print(tstd_lls(1, np.array([0,1]), 10), llt(1,0,1,10), 'differently standardized')
print(ts_lls(1, np.array([0,1]), 10), llt(1,0,1,10))
print(tstd_lls(1, np.array([0,1.*10./8.]), 10), llt(1.,0,1.,10))
print(ts_lls(1, np.array([0,1]), 100), llt(1,0,1,100))
print(tstd_lls(1, np.array([0,1]), 10), llt(1,0,1.*np.sqrt(8/10.),10))
from numpy.testing import assert_almost_equal
params =[(0, 1), (1.,1.), (0.,2.), ( 1., 2.)]
yt = np.linspace(-2.,2.,11)
for loc,scale in params:
dlldlo = misc.derivative(llnormloc, loc, dx=1e-10, n=1, args=(yt,scale), order=3)
dlldsc = misc.derivative(llnormscale, scale, dx=1e-10, n=1, args=(yt,loc), order=3)
gr = locscale_grad(yt, loc, scale, norm_dlldy)
assert_almost_equal(dlldlo, gr[0], 5, err_msg='deriv loc')
assert_almost_equal(dlldsc, gr[1], 5, err_msg='deriv scale')
for df in [3, 10, 100]:
for loc,scale in params:
dlldlo = misc.derivative(lltloc, loc, dx=1e-10, n=1, args=(yt,scale,df), order=3)
dlldsc = misc.derivative(lltscale, scale, dx=1e-10, n=1, args=(yt,loc,df), order=3)
gr = locscale_grad(yt, loc, scale, ts_dlldy, df)
assert_almost_equal(dlldlo, gr[0], 4, err_msg='deriv loc')
assert_almost_equal(dlldsc, gr[1], 4, err_msg='deriv scale')
assert_almost_equal(ts_lls(yt, np.array([loc, scale**2]), df),
llt(yt,loc,scale,df), 5,
err_msg='loglike')
assert_almost_equal(tstd_lls(yt, np.array([loc, scale**2]), df),
llt(yt,loc,scale*np.sqrt((df-2.)/df),df), 5,
err_msg='loglike')
|
chris-chris/tensorflow | refs/heads/master | tensorflow/contrib/tensor_forest/python/constants.py | 81 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants used by tensorforest. Some of these map to values in C++ ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# If tree[i][0] equals this value, then i is a leaf node.
LEAF_NODE = -1
|
sestrella/ansible | refs/heads/devel | test/units/modules/storage/netapp/test_netapp_e_volume.py | 21 | # coding=utf-8
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
from unittest import mock
except ImportError:
import mock
from ansible.module_utils.netapp import NetAppESeriesModule
from ansible.modules.storage.netapp.netapp_e_volume import NetAppESeriesVolume
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
class NetAppESeriesVolumeTest(ModuleTestCase):
REQUIRED_PARAMS = {"api_username": "username",
"api_password": "password",
"api_url": "http://localhost/devmgr/v2",
"ssid": "1",
"validate_certs": "no"}
THIN_VOLUME_RESPONSE = [{"capacity": "1288490188800",
"volumeRef": "3A000000600A098000A4B28D000010475C405428",
"status": "optimal",
"protectionType": "type1Protection",
"maxVirtualCapacity": "281474976710656",
"initialProvisionedCapacity": "4294967296",
"currentProvisionedCapacity": "4294967296",
"provisionedCapacityQuota": "1305670057984",
"growthAlertThreshold": 85,
"expansionPolicy": "automatic",
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000001000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "volume"}],
"dataAssurance": True,
"segmentSize": 131072,
"diskPool": True,
"listOfMappings": [],
"mapped": False,
"currentControllerId": "070000000000000000000001",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 0},
"name": "thin_volume",
"id": "3A000000600A098000A4B28D000010475C405428"}]
VOLUME_GET_RESPONSE = [{"offline": False,
"raidLevel": "raid6",
"capacity": "214748364800",
"reconPriority": 1,
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B9D100000F095C2F7F31",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Clare"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000002",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "214748364800",
"name": "Matthew",
"id": "02000000600A098000A4B9D100000F095C2F7F31"},
{"offline": False,
"raidLevel": "raid6",
"capacity": "107374182400",
"reconPriority": 1,
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B28D00000FBE5C2F7F26",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Samantha"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000001",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "107374182400",
"name": "Samantha",
"id": "02000000600A098000A4B28D00000FBE5C2F7F26"},
{"offline": False,
"raidLevel": "raid6",
"capacity": "107374182400",
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B9D100000F0B5C2F7F40",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"volumeGroupRef": "04000000600A098000A4B9D100000F085C2F7F26",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Micah"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000002",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "107374182400",
"name": "Micah",
"id": "02000000600A098000A4B9D100000F0B5C2F7F40"}]
STORAGE_POOL_GET_RESPONSE = [{"offline": False,
"raidLevel": "raidDiskPool",
"volumeGroupRef": "04000000600A",
"securityType": "capable",
"protectionInformationCapable": False,
"protectionInformationCapabilities": {"protectionInformationCapable": True,
"protectionType": "type2Protection"},
"volumeGroupData": {"type": "diskPool",
"diskPoolData": {"reconstructionReservedDriveCount": 1,
"reconstructionReservedAmt": "296889614336",
"reconstructionReservedDriveCountCurrent": 1,
"poolUtilizationWarningThreshold": 0,
"poolUtilizationCriticalThreshold": 85,
"poolUtilizationState": "utilizationOptimal",
"unusableCapacity": "0",
"degradedReconstructPriority": "high",
"criticalReconstructPriority": "highest",
"backgroundOperationPriority": "low",
"allocGranularity": "4294967296"}},
"reservedSpaceAllocated": False,
"securityLevel": "fde",
"usedSpace": "863288426496",
"totalRaidedSpace": "2276332666880",
"raidStatus": "optimal",
"freeSpace": "1413044240384",
"drivePhysicalType": "sas",
"driveMediaType": "hdd",
"diskPool": True,
"id": "04000000600A098000A4B9D100000F085C2F7F26",
"name": "employee_data_storage_pool"},
{"offline": False,
"raidLevel": "raid1",
"volumeGroupRef": "04000000600A098000A4B28D00000FBD5C2F7F19",
"state": "complete",
"securityType": "capable",
"drawerLossProtection": False,
"protectionInformationCapable": False,
"protectionInformationCapabilities": {"protectionInformationCapable": True,
"protectionType": "type2Protection"},
"volumeGroupData": {"type": "unknown", "diskPoolData": None},
"reservedSpaceAllocated": False,
"securityLevel": "fde",
"usedSpace": "322122547200",
"totalRaidedSpace": "598926258176",
"raidStatus": "optimal",
"freeSpace": "276803710976",
"drivePhysicalType": "sas",
"driveMediaType": "hdd",
"diskPool": False,
"id": "04000000600A098000A4B28D00000FBD5C2F7F19",
"name": "database_storage_pool"}]
GET_LONG_LIVED_OPERATION_RESPONSE = [
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "initializing", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]},
{"returnCode": "ok",
"longLivedOpsProgress": [
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B9D1000037315D494C6F", "pending": False, "percentComplete": 1, "timeToCompletion": 20},
"format": None, "volCreation": None, "volDeletion": None},
{"volAction": "complete", "reconstruct": None, "volExpansion": None, "volAndCapExpansion": None,
"init": {"volumeRef": "02000000600A098000A4B28D00003D2C5D494C87", "pending": False, "percentComplete": 0, "timeToCompletion": 18},
"volCreation": None, "volDeletion": None}]}]
WORKLOAD_GET_RESPONSE = [{"id": "4200000001000000000000000000000000000000", "name": "general_workload_1",
"workloadAttributes": [{"key": "profileId", "value": "Other_1"}]},
{"id": "4200000002000000000000000000000000000000", "name": "employee_data",
"workloadAttributes": [{"key": "use", "value": "EmployeeData"},
{"key": "location", "value": "ICT"},
{"key": "private", "value": "public"},
{"key": "profileId", "value": "ansible_workload_1"}]},
{"id": "4200000003000000000000000000000000000000", "name": "customer_database",
"workloadAttributes": [{"key": "use", "value": "customer_information"},
{"key": "location", "value": "global"},
{"key": "profileId", "value": "ansible_workload_2"}]},
{"id": "4200000004000000000000000000000000000000", "name": "product_database",
"workloadAttributes": [{"key": "use", "value": "production_information"},
{"key": "security", "value": "private"},
{"key": "location", "value": "global"},
{"key": "profileId", "value": "ansible_workload_4"}]}]
REQUEST_FUNC = "ansible.modules.storage.netapp.netapp_e_volume.NetAppESeriesVolume.request"
GET_VOLUME_FUNC = "ansible.modules.storage.netapp.netapp_e_volume.NetAppESeriesVolume.get_volume"
SLEEP_FUNC = "ansible.modules.storage.netapp.netapp_e_volume.sleep"
def _set_args(self, args=None):
module_args = self.REQUIRED_PARAMS.copy()
if args is not None:
module_args.update(args)
set_module_args(module_args)
def test_module_arguments_pass(self):
"""Ensure valid arguments successful create a class instance."""
arg_sets = [{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 10},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1024,
"thin_volume_growth_alert_threshold": 99},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "kb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 67108864}]
# validate size normalization
for arg_set in arg_sets:
self._set_args(arg_set)
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.size_b, volume_object.convert_to_aligned_bytes(arg_set["size"]))
self.assertEqual(volume_object.thin_volume_repo_size_b, volume_object.convert_to_aligned_bytes(arg_set["thin_volume_repo_size"]))
self.assertEqual(volume_object.thin_volume_expansion_policy, "automatic")
if "thin_volume_max_repo_size" not in arg_set.keys():
self.assertEqual(volume_object.thin_volume_max_repo_size_b, volume_object.convert_to_aligned_bytes(arg_set["size"]))
else:
self.assertEqual(volume_object.thin_volume_max_repo_size_b,
volume_object.convert_to_aligned_bytes(arg_set["thin_volume_max_repo_size"]))
# validate metadata form
self._set_args(
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10, "workload_name": "workload1",
"metadata": {"availability": "public", "security": "low"}})
volume_object = NetAppESeriesVolume()
for entry in volume_object.metadata:
self.assertTrue(entry in [{'value': 'low', 'key': 'security'}, {'value': 'public', 'key': 'availability'}])
def test_module_arguments_fail(self):
"""Ensure invalid arguments values do not create a class instance."""
arg_sets = [{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 260},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 10},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 9},
{"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 10000, "size_unit": "gb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 100}]
for arg_set in arg_sets:
with self.assertRaises(AnsibleFailJson):
self._set_args(arg_set)
print(arg_set)
volume_object = NetAppESeriesVolume()
def test_get_volume_pass(self):
"""Evaluate the get_volume method."""
with mock.patch(self.REQUEST_FUNC,
side_effect=[(200, self.VOLUME_GET_RESPONSE), (200, self.THIN_VOLUME_RESPONSE)]):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_volume(),
[entry for entry in self.VOLUME_GET_RESPONSE if entry["name"] == "Matthew"][0])
with mock.patch(self.REQUEST_FUNC,
side_effect=[(200, self.VOLUME_GET_RESPONSE), (200, self.THIN_VOLUME_RESPONSE)]):
self._set_args({"state": "present", "name": "NotAVolume", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_volume(), {})
def test_get_volume_fail(self):
"""Evaluate the get_volume exception paths."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of thick volumes."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_volume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of thin volumes."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.VOLUME_GET_RESPONSE), Exception()]):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_volume()
def tests_wait_for_volume_availability_pass(self):
"""Ensure wait_for_volume_availability completes as expected."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.GET_VOLUME_FUNC, side_effect=[False, False, True]):
volume_object.wait_for_volume_availability()
def tests_wait_for_volume_availability_fail(self):
"""Ensure wait_for_volume_availability throws the expected exceptions."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.get_volume = lambda: False
with self.assertRaisesRegexp(AnsibleFailJson, "Timed out waiting for the volume"):
with mock.patch(self.SLEEP_FUNC, return_value=None):
volume_object.wait_for_volume_availability()
def tests_wait_for_volume_action_pass(self):
"""Ensure wait_for_volume_action completes as expected."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315D494C6F",
"storageVolumeRef": "02000000600A098000A4B9D1000037315DXXXXXX"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[1]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[2]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[3])]):
volume_object.wait_for_volume_action()
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315DXXXXXX",
"storageVolumeRef": "02000000600A098000A4B9D1000037315D494C6F"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[1]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[2]),
(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[3])]):
volume_object.wait_for_volume_action()
def tests_wait_for_volume_action_fail(self):
"""Ensure wait_for_volume_action throws the expected exceptions."""
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool", "size": 100,
"wait_for_initialization": True})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "02000000600A098000A4B9D1000037315DXXXXXX",
"storageVolumeRef": "02000000600A098000A4B9D1000037315D494C6F"}
with mock.patch(self.SLEEP_FUNC, return_value=None):
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to get volume expansion progress."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.wait_for_volume_action()
with self.assertRaisesRegexp(AnsibleFailJson, "Expansion action failed to complete."):
with mock.patch(self.REQUEST_FUNC, return_value=(200, self.GET_LONG_LIVED_OPERATION_RESPONSE[0])):
volume_object.wait_for_volume_action(timeout=300)
def test_get_storage_pool_pass(self):
"""Evaluate the get_storage_pool method."""
with mock.patch(self.REQUEST_FUNC, return_value=(200, self.STORAGE_POOL_GET_RESPONSE)):
self._set_args({"state": "present", "name": "NewVolume", "storage_pool_name": "employee_data_storage_pool",
"size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_storage_pool(), [entry for entry in self.STORAGE_POOL_GET_RESPONSE if
entry["name"] == "employee_data_storage_pool"][0])
self._set_args(
{"state": "present", "name": "NewVolume", "storage_pool_name": "NotAStoragePool", "size": 100})
volume_object = NetAppESeriesVolume()
self.assertEqual(volume_object.get_storage_pool(), {})
def test_get_storage_pool_fail(self):
"""Evaluate the get_storage_pool exception paths."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to obtain list of storage pools."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.get_storage_pool()
def test_check_storage_pool_sufficiency_pass(self):
"""Ensure passing logic."""
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = [entry for entry in self.STORAGE_POOL_GET_RESPONSE
if entry["name"] == "employee_data_storage_pool"][0]
volume_object.check_storage_pool_sufficiency()
def test_check_storage_pool_sufficiency_fail(self):
"""Validate exceptions are thrown for insufficient storage pool resources."""
self._set_args({"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": True, "thin_volume_repo_size": 64, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 10})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Requested storage pool"):
volume_object.check_storage_pool_sufficiency()
with self.assertRaisesRegexp(AnsibleFailJson,
"Thin provisioned volumes can only be created on raid disk pools."):
volume_object.pool_detail = [entry for entry in self.STORAGE_POOL_GET_RESPONSE
if entry["name"] == "database_storage_pool"][0]
volume_object.volume_detail = {}
volume_object.check_storage_pool_sufficiency()
with self.assertRaisesRegexp(AnsibleFailJson, "requires the storage pool to be DA-compatible."):
volume_object.pool_detail = {"diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type0Protection",
"protectionInformationCapable": False}}
volume_object.volume_detail = {}
volume_object.data_assurance_enabled = True
volume_object.check_storage_pool_sufficiency()
volume_object.pool_detail = {"diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type2Protection",
"protectionInformationCapable": True}}
volume_object.check_storage_pool_sufficiency()
self._set_args({"state": "present", "name": "vol", "storage_pool_name": "pool", "size": 100, "size_unit": "tb",
"thin_provision": False})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson,
"Not enough storage pool free space available for the volume's needs."):
volume_object.pool_detail = {"freeSpace": 10, "diskPool": True,
"protectionInformationCapabilities": {"protectionType": "type2Protection",
"protectionInformationCapable": True}}
volume_object.volume_detail = {"totalSizeInBytes": 100}
volume_object.data_assurance_enabled = True
volume_object.size_b = 1
volume_object.check_storage_pool_sufficiency()
def test_update_workload_tags_pass(self):
"""Validate updating workload tags."""
test_sets = [[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data"}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "global"}}, False],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "local"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "customer_database",
"metadata": {"use": "customer_information", "location": "global", "importance": "no"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "newWorkload",
"metadata": {"for_testing": "yes"}}, True],
[{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "newWorkload"}, True]]
for test in test_sets:
self._set_args(test[0])
volume_object = NetAppESeriesVolume()
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), (200, {"id": 1})]):
self.assertEqual(volume_object.update_workload_tags(), test[1])
def test_update_workload_tags_fail(self):
"""Validate updating workload tags fails appropriately."""
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data"})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to retrieve storage array workload tags."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.update_workload_tags()
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data", "metadata": {"key": "not-use", "value": "EmployeeData"}})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create new workload tag."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), Exception()]):
volume_object.update_workload_tags()
self._set_args({"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100,
"workload_name": "employee_data2", "metadata": {"key": "use", "value": "EmployeeData"}})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create new workload tag."):
with mock.patch(self.REQUEST_FUNC, side_effect=[(200, self.WORKLOAD_GET_RESPONSE), Exception()]):
volume_object.update_workload_tags()
def test_get_volume_property_changes_pass(self):
"""Verify correct dictionary is returned"""
# no property changes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), dict())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": True, "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1},
"flashCached": True, "growthAlertThreshold": "90",
"expansionPolicy": "automatic", "segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), dict())
# property changes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": False, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": False,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 1}, "flashCached": True,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": True,
"read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": False,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True, "cacheWithoutBatteries": True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "cache_without_batteries": True,
"read_ahead_enable": False, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1}, "flashCached": False,
"segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(), {"metaTags": [],
'cacheSettings': {'readCacheEnable': True,
'writeCacheEnable': True,
'readAheadEnable': False,
"cacheWithoutBatteries": True},
'flashCache': True})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True,
"read_ahead_enable": True, "thin_provision": True, "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"metadata": [],
"cacheSettings": {"cwob": True, "readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 1},
"flashCached": True, "growthAlertThreshold": "95",
"expansionPolicy": "automatic", "segmentSize": str(128 * 1024)}
self.assertEqual(volume_object.get_volume_property_changes(),
{"metaTags": [], 'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True},
'growthAlertThreshold': 90, 'flashCache': True})
def test_get_volume_property_changes_fail(self):
"""Verify correct exception is thrown"""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "ssd_cache_enabled": True,
"read_cache_enable": True, "write_cache_enable": True, "read_ahead_enable": True, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {
"cacheSettings": {"cwob": False, "readCacheEnable": True, "writeCacheEnable": True, "readAheadMultiplier": 1},
"flashCached": True, "segmentSize": str(512 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "Existing volume segment size is"):
volume_object.get_volume_property_changes()
def test_get_expand_volume_changes_pass(self):
"""Verify expansion changes."""
# thick volumes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(50 * 1024 * 1024 * 1024), "thinProvisioned": False}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "expansionSize": 100 * 1024 * 1024 * 1024})
# thin volumes
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "automatic", "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(50 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "automatic",
"provisionedCapacityQuota": str(1000 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newVirtualSize": 100 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "automatic", "thin_volume_repo_size": 64,
"thin_volume_max_repo_size": 1000, "thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "automatic",
"provisionedCapacityQuota": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 1000 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 504, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 504 * 1024 * 1024 * 1024})
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 756, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
self.assertEqual(volume_object.get_expand_volume_changes(),
{"sizeUnit": "bytes", "newRepositorySize": 756 * 1024 * 1024 * 1024})
def test_get_expand_volume_changes_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(1000 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "Reducing the size of volumes is not permitted."):
volume_object.get_expand_volume_changes()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 502, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "The thin volume repository increase must be between or equal"):
volume_object.get_expand_volume_changes()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"capacity": str(100 * 1024 * 1024 * 1024), "thinProvisioned": True,
"expansionPolicy": "manual",
"currentProvisionedCapacity": str(500 * 1024 * 1024 * 1024)}
with self.assertRaisesRegexp(AnsibleFailJson, "The thin volume repository increase must be between or equal"):
volume_object.get_expand_volume_changes()
def test_create_volume_pass(self):
"""Verify volume creation."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.create_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.create_volume()
def test_create_volume_fail(self):
"""Verify exceptions thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.create_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.create_volume()
def test_update_volume_properties_pass(self):
"""verify property update."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"metadata": [{"key": "workloadId", "value": "12345"}]}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {}
volume_object.workload_id = "4200000001000000000000000000000000000000"
self.assertFalse(volume_object.update_volume_properties())
def test_update_volume_properties_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update volume properties."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self.assertTrue(volume_object.update_volume_properties())
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.pool_detail = {"id": "12345"}
volume_object.wait_for_volume_availability = lambda: None
volume_object.get_volume = lambda: {"id": "12345'"}
volume_object.get_volume_property_changes = lambda: {
'cacheSettings': {'readCacheEnable': True, 'writeCacheEnable': True}, 'growthAlertThreshold': 90,
'flashCached': True}
volume_object.workload_id = "4200000001000000000000000000000000000000"
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update thin volume properties."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
self.assertTrue(volume_object.update_volume_properties())
def test_expand_volume_pass(self):
"""Verify volume expansion."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.expand_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.expand_volume()
def test_expand_volume_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": False}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to expand volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.expand_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True})
volume_object = NetAppESeriesVolume()
volume_object.get_expand_volume_changes = lambda: {"sizeUnit": "bytes",
"expansionSize": 100 * 1024 * 1024 * 1024}
volume_object.volume_detail = {"id": "12345", "thinProvisioned": True}
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to expand thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.expand_volume()
def test_delete_volume_pass(self):
"""Verify volume deletion."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.delete_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True,
"thin_volume_expansion_policy": "manual", "thin_volume_repo_size": 760, "thin_volume_max_repo_size": 1000,
"thin_volume_growth_alert_threshold": 90})
volume_object = NetAppESeriesVolume()
volume_object.volume_detail = {"id": "12345"}
with mock.patch(self.REQUEST_FUNC, return_value=(200, {})):
volume_object.delete_volume()
def test_delete_volume_fail(self):
"""Verify exceptions are thrown."""
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": False})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.delete_volume()
self._set_args(
{"state": "present", "name": "Matthew", "storage_pool_name": "pool", "size": 100, "thin_provision": True})
volume_object = NetAppESeriesVolume()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to delete thin volume."):
with mock.patch(self.REQUEST_FUNC, return_value=Exception()):
volume_object.delete_volume()
|
civicresourcegroup/django-filer | refs/heads/develop | filer/migrations/0009_auto__add_field_folderpermission_can_edit_new__add_field_folderpermiss.py | 49 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FolderPermission.can_edit_new'
db.add_column('filer_folderpermission', 'can_edit_new',
self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, blank=True),
keep_default=False)
# Adding field 'FolderPermission.can_read_new'
db.add_column('filer_folderpermission', 'can_read_new',
self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, blank=True),
keep_default=False)
# Adding field 'FolderPermission.can_add_children_new'
db.add_column('filer_folderpermission', 'can_add_children_new',
self.gf('django.db.models.fields.SmallIntegerField')(default=None, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FolderPermission.can_edit_new'
db.delete_column('filer_folderpermission', 'can_edit_new')
# Deleting field 'FolderPermission.can_read_new'
db.delete_column('filer_folderpermission', 'can_read_new')
# Deleting field 'FolderPermission.can_add_children_new'
db.delete_column('filer_folderpermission', 'can_add_children_new')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.clipboard': {
'Meta': {'object_name': 'Clipboard'},
'files': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'in_clipboards'", 'symmetrical': 'False', 'through': "orm['filer.ClipboardItem']", 'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filer_clipboards'", 'to': "orm['auth.User']"})
},
'filer.clipboarditem': {
'Meta': {'object_name': 'ClipboardItem'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Clipboard']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folderpermission': {
'Meta': {'object_name': 'FolderPermission'},
'can_add_children': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_add_children_new': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'can_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_edit_new': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'can_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_read_new': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'everybody': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Folder']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.User']"})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['filer'] |
openstack/nova | refs/heads/master | nova/tests/unit/virt/zvm/test_driver.py | 3 | # Copyright 2017,2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import os
from oslo_utils.fixture import uuidsentinel
from nova.compute import provider_tree
from nova import conf
from nova import context
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.virt import fake
from nova.virt.zvm import driver as zvmdriver
CONF = conf.CONF
class TestZVMDriver(test.NoDBTestCase):
def setUp(self):
super(TestZVMDriver, self).setUp()
self.flags(my_ip='192.168.1.1',
instance_name_template='abc%05d')
self.flags(cloud_connector_url='https://1.1.1.1:1111', group='zvm')
with mock.patch('nova.virt.zvm.utils.ConnectorClient.call') as mcall, \
mock.patch('pwd.getpwuid', return_value=mock.Mock(pw_name='test')):
mcall.return_value = {'hypervisor_hostname': 'TESTHOST',
'ipl_time': 'IPL at 11/14/17 10:47:44 EST'}
self._driver = zvmdriver.ZVMDriver(fake.FakeVirtAPI())
self._hypervisor = self._driver._hypervisor
self._context = context.RequestContext('fake_user', 'fake_project')
self._image_id = uuidsentinel.imag_id
self._instance_values = {
'display_name': 'test',
'uuid': uuidsentinel.inst_id,
'vcpus': 1,
'memory_mb': 1024,
'image_ref': self._image_id,
'root_gb': 0,
}
self._instance = fake_instance.fake_instance_obj(
self._context, **self._instance_values)
self._instance.flavor = objects.Flavor(name='testflavor',
vcpus=1, root_gb=3, ephemeral_gb=10,
swap=0, memory_mb=512, extra_specs={})
self._eph_disks = [{'guest_format': u'ext3',
'device_name': u'/dev/sdb',
'disk_bus': None,
'device_type': None,
'size': 1},
{'guest_format': u'ext4',
'device_name': u'/dev/sdc',
'disk_bus': None,
'device_type': None,
'size': 2}]
self._block_device_info = {'swap': None,
'root_device_name': u'/dev/sda',
'ephemerals': self._eph_disks,
'block_device_mapping': []}
fake_image_meta = {'status': 'active',
'properties': {'os_distro': 'rhel7.2'},
'name': 'rhel72eckdimage',
'deleted': False,
'container_format': 'bare',
'disk_format': 'raw',
'id': self._image_id,
'owner': 'cfc26f9d6af948018621ab00a1675310',
'checksum': 'b026cd083ef8e9610a29eaf71459cc',
'min_disk': 0,
'is_public': False,
'deleted_at': None,
'min_ram': 0,
'size': 465448142}
self._image_meta = objects.ImageMeta.from_dict(fake_image_meta)
subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
dns=[network_model.IP('192.168.0.1')],
gateway=
network_model.IP('192.168.0.1'),
ips=[
network_model.IP('192.168.0.100')],
routes=None)
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_4],
vlan=None,
bridge_interface=None,
injected=True)
self._network_values = {
'id': None,
'address': 'DE:AD:BE:EF:00:00',
'network': network,
'type': network_model.VIF_TYPE_OVS,
'devname': None,
'ovs_interfaceid': None,
'rxtx_cap': 3
}
self._network_info = network_model.NetworkInfo([
network_model.VIF(**self._network_values)
])
self.mock_update_task_state = mock.Mock()
def test_driver_init_no_url(self):
self.flags(cloud_connector_url=None, group='zvm')
self.assertRaises(exception.ZVMDriverException,
zvmdriver.ZVMDriver, 'virtapi')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_get_available_resource_err_case(self, call):
res = {'overallRC': 1, 'errmsg': 'err', 'rc': 0, 'rs': 0}
call.side_effect = exception.ZVMConnectorError(res)
results = self._driver.get_available_resource()
self.assertEqual(0, results['vcpus'])
self.assertEqual(0, results['memory_mb_used'])
self.assertEqual(0, results['disk_available_least'])
self.assertEqual(0, results['hypervisor_version'])
self.assertEqual('TESTHOST', results['hypervisor_hostname'])
def test_driver_template_validation(self):
self.flags(instance_name_template='abc%6d')
self.assertRaises(exception.ZVMDriverException,
self._driver._validate_options)
@mock.patch('nova.virt.zvm.guest.Guest.get_info')
def test_get_info(self, mock_get):
self._driver.get_info(self._instance)
mock_get.assert_called_once_with()
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_get_image_info_err(self, call):
res = {'overallRC': 500, 'errmsg': 'err', 'rc': 0, 'rs': 0}
call.side_effect = exception.ZVMConnectorError(res)
self.assertRaises(exception.ZVMConnectorError,
self._driver._get_image_info,
'context', 'image_meta_id', 'os_distro')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._import_spawn_image')
def test_private_get_image_info(self, image_import, call):
res = {'overallRC': 404, 'errmsg': 'err', 'rc': 0, 'rs': 0}
call_response = []
call_response.append(exception.ZVMConnectorError(results=res))
call_response.append([{'imagename': 'image-info'}])
call.side_effect = call_response
self._driver._get_image_info('context', 'image_meta_id', 'os_distro')
image_import.assert_called_once_with('context', 'image_meta_id',
'os_distro')
call.assert_has_calls(
[mock.call('image_query', imagename='image_meta_id')] * 2
)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_get_image_info_exist(self, call):
call.return_value = [{'imagename': 'image-info'}]
res = self._driver._get_image_info('context', 'image_meta_id',
'os_distro')
call.assert_called_once_with('image_query', imagename='image_meta_id')
self.assertEqual('image-info', res)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def _test_set_disk_list(self, call, has_get_root_units=False,
has_eph_disks=False):
disk_list = [{'is_boot_disk': True, 'size': '3g'}]
eph_disk_list = [{'format': u'ext3', 'size': '1g'},
{'format': u'ext3', 'size': '2g'}]
_inst = copy.deepcopy(self._instance)
_bdi = copy.deepcopy(self._block_device_info)
if has_get_root_units:
# overwrite
disk_list = [{'is_boot_disk': True, 'size': '3338'}]
call.return_value = '3338'
_inst['root_gb'] = 0
else:
_inst['root_gb'] = 3
if has_eph_disks:
disk_list += eph_disk_list
else:
_bdi['ephemerals'] = []
eph_disk_list = []
res1, res2 = self._driver._set_disk_list(_inst, self._image_meta.id,
_bdi)
if has_get_root_units:
call.assert_called_once_with('image_get_root_disk_size',
self._image_meta.id)
self.assertEqual(disk_list, res1)
self.assertEqual(eph_disk_list, res2)
def test_private_set_disk_list_simple(self):
self._test_set_disk_list()
def test_private_set_disk_list_with_eph_disks(self):
self._test_set_disk_list(has_eph_disks=True)
def test_private_set_disk_list_with_get_root_units(self):
self._test_set_disk_list(has_get_root_units=True)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_setup_network(self, call):
inst_nets = []
_net = {'ip_addr': '192.168.0.100',
'gateway_addr': '192.168.0.1',
'cidr': '192.168.0.1/24',
'mac_addr': 'DE:AD:BE:EF:00:00',
'nic_id': None}
inst_nets.append(_net)
self._driver._setup_network('vm_name', 'os_distro',
self._network_info,
self._instance)
call.assert_called_once_with('guest_create_network_interface',
'vm_name', 'os_distro', inst_nets)
@mock.patch('nova.virt.images.fetch')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_private_import_spawn_image(self, call, fetch):
image_name = CONF.zvm.image_tmp_path + '/image_name'
image_url = "file://" + image_name
image_meta = {'os_version': 'os_version'}
with mock.patch('os.path.exists', side_effect=[False]):
self._driver._import_spawn_image(self._context, 'image_name',
'os_version')
fetch.assert_called_once_with(self._context, 'image_name',
image_name)
call.assert_called_once_with('image_import', 'image_name', image_url,
image_meta, remote_host='test@192.168.1.1')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_destroy(self, call, guest_exists):
guest_exists.return_value = True
self._driver.destroy(self._context, self._instance,
network_info=self._network_info)
call.assert_called_once_with('guest_delete', self._instance['name'])
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.compute.manager.ComputeVirtAPI.wait_for_instance_event')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._setup_network')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._set_disk_list')
@mock.patch('nova.virt.zvm.utils.generate_configdrive')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._get_image_info')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_spawn(self, call, get_image_info, gen_conf_file, set_disk_list,
setup_network, mock_wait, mock_exists):
_bdi = copy.copy(self._block_device_info)
get_image_info.return_value = 'image_name'
gen_conf_file.return_value = 'transportfiles'
set_disk_list.return_value = 'disk_list', 'eph_list'
mock_exists.return_value = False
self._driver.spawn(self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
allocations=None, network_info=self._network_info,
block_device_info=_bdi)
gen_conf_file.assert_called_once_with(self._context, self._instance,
None, self._network_info, None)
get_image_info.assert_called_once_with(self._context,
self._image_meta.id,
self._image_meta.properties.os_distro)
set_disk_list.assert_called_once_with(self._instance, 'image_name',
_bdi)
setup_network.assert_called_once_with(self._instance.name,
self._image_meta.properties.os_distro,
self._network_info, self._instance)
call.assert_has_calls([
mock.call('guest_create', self._instance.name,
1, 1024, disk_list='disk_list'),
mock.call('guest_deploy', self._instance.name, 'image_name',
transportfiles='transportfiles',
remotehost='test@192.168.1.1'),
mock.call('guest_config_minidisks', self._instance.name,
'eph_list'),
mock.call('guest_start', self._instance.name)
])
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._get_image_info')
def test_spawn_image_no_distro_empty(self, get_image_info, mock_exists):
meta = {'status': 'active',
'deleted': False,
'properties': {'os_distro': ''},
'id': self._image_id,
'size': 465448142}
self._image_meta = objects.ImageMeta.from_dict(meta)
mock_exists.return_value = False
self.assertRaises(exception.InvalidInput, self._driver.spawn,
self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
allocations=None, network_info=self._network_info,
block_device_info=None)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_exists')
@mock.patch('nova.virt.zvm.driver.ZVMDriver._get_image_info')
def test_spawn_image_no_distro_none(self, get_image_info, mock_exists):
meta = {'status': 'active',
'deleted': False,
'id': self._image_id,
'size': 465448142}
self._image_meta = objects.ImageMeta.from_dict(meta)
mock_exists.return_value = False
self.assertRaises(exception.InvalidInput, self._driver.spawn,
self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
allocations=None, network_info=self._network_info,
block_device_info=None)
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_snapshot(self, call, get_image_service, mock_open):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
call_resp = ['', {"os_version": "rhel7.2",
"dest_url": "file:///path/to/target"}, '']
call.side_effect = call_resp
new_image_meta = {
'status': 'active',
'properties': {
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': self._instance['project_id'],
'os_distro': call_resp[1]['os_version'],
'architecture': 's390x',
'hypervisor_type': 'zvm'
},
'disk_format': 'raw',
'container_format': 'bare',
}
image_path = os.path.join(os.path.normpath(
CONF.zvm.image_tmp_path), image_id)
dest_path = "file://" + image_path
self._driver.snapshot(self._context, self._instance, image_id,
self.mock_update_task_state)
get_image_service.assert_called_with(self._context, image_id)
mock_open.assert_called_once_with(image_path, 'r')
ret_file = mock_open.return_value.__enter__.return_value
image_service.update.assert_called_once_with(self._context,
image_id,
new_image_meta,
ret_file,
purge_props=False)
self.mock_update_task_state.assert_has_calls([
mock.call(task_state='image_pending_upload'),
mock.call(expected_state='image_pending_upload',
task_state='image_uploading')
])
call.assert_has_calls([
mock.call('guest_capture', self._instance.name, image_id),
mock.call('image_export', image_id, dest_path,
remote_host=mock.ANY),
mock.call('image_delete', image_id)
])
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_capture')
def test_snapshot_capture_fail(self, mock_capture, get_image_service):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
mock_capture.side_effect = exception.ZVMDriverException(error='error')
self.assertRaises(exception.ZVMDriverException, self._driver.snapshot,
self._context, self._instance, image_id,
self.mock_update_task_state)
self.mock_update_task_state.assert_called_once_with(
task_state='image_pending_upload')
image_service.delete.assert_called_once_with(self._context, image_id)
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_delete')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_export')
def test_snapshot_import_fail(self, mock_import, mock_delete,
call, get_image_service):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
mock_import.side_effect = exception.ZVMDriverException(error='error')
self.assertRaises(exception.ZVMDriverException, self._driver.snapshot,
self._context, self._instance, image_id,
self.mock_update_task_state)
self.mock_update_task_state.assert_called_once_with(
task_state='image_pending_upload')
get_image_service.assert_called_with(self._context, image_id)
call.assert_called_once_with('guest_capture',
self._instance.name, image_id)
mock_delete.assert_called_once_with(image_id)
image_service.delete.assert_called_once_with(self._context, image_id)
@mock.patch('builtins.open')
@mock.patch('nova.image.glance.get_remote_image_service')
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_delete')
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.image_export')
def test_snapshot_update_fail(self, mock_import, mock_delete, call,
get_image_service, mock_open):
image_service = mock.Mock()
image_id = 'e9ee1562-3ea1-4cb1-9f4c-f2033000eab1'
get_image_service.return_value = (image_service, image_id)
image_service.update.side_effect = exception.ImageNotAuthorized(
image_id='dummy')
image_path = os.path.join(os.path.normpath(
CONF.zvm.image_tmp_path), image_id)
self.assertRaises(exception.ImageNotAuthorized, self._driver.snapshot,
self._context, self._instance, image_id,
self.mock_update_task_state)
mock_open.assert_called_once_with(image_path, 'r')
get_image_service.assert_called_with(self._context, image_id)
mock_delete.assert_called_once_with(image_id)
image_service.delete.assert_called_once_with(self._context, image_id)
self.mock_update_task_state.assert_has_calls([
mock.call(task_state='image_pending_upload'),
mock.call(expected_state='image_pending_upload',
task_state='image_uploading')
])
call.assert_called_once_with('guest_capture', self._instance.name,
image_id)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_start')
def test_guest_start(self, call):
self._driver.power_on(self._context, self._instance, None)
call.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_softstop')
def test_power_off(self, ipa):
self._driver.power_off(self._instance)
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_softstop')
def test_power_off_with_timeout_interval(self, ipa):
self._driver.power_off(self._instance, 60, 10)
ipa.assert_called_once_with(self._instance.name,
timeout=60, retry_interval=10)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_pause')
def test_pause(self, ipa):
self._driver.pause(self._instance)
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_unpause')
def test_unpause(self, ipa):
self._driver.unpause(self._instance)
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_reboot')
def test_reboot_soft(self, ipa):
self._driver.reboot(None, self._instance, None, 'SOFT')
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.guest_reset')
def test_reboot_hard(self, ipa):
self._driver.reboot(None, self._instance, None, 'HARD')
ipa.assert_called_once_with(self._instance.name)
@mock.patch('nova.virt.zvm.hypervisor.Hypervisor.list_names')
def test_instance_exists(self, mock_list):
mock_list.return_value = [self._instance.name.upper()]
# Create a new server which not in list_instances's output
another_instance = fake_instance.fake_instance_obj(self._context,
id=10)
self.assertTrue(self._driver.instance_exists(self._instance))
self.assertFalse(self._driver.instance_exists(another_instance))
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_get_console_output(self, call):
call.return_value = 'console output'
outputs = self._driver.get_console_output(None, self._instance)
call.assert_called_once_with('guest_get_console_output', 'abc00001')
self.assertEqual('console output', outputs)
@mock.patch('nova.virt.zvm.utils.ConnectorClient.call')
def test_update_provider_tree(self, call):
host_info = {'vcpus': 84,
'disk_total': 2000,
'memory_mb': 78192}
call.return_value = host_info
expected_inv = {
'VCPU': {
'total': 84,
'min_unit': 1,
'max_unit': 84,
'step_size': 1,
'allocation_ratio': CONF.initial_cpu_allocation_ratio,
'reserved': CONF.reserved_host_cpus,
},
'MEMORY_MB': {
'total': 78192,
'min_unit': 1,
'max_unit': 78192,
'step_size': 1,
'allocation_ratio': CONF.initial_ram_allocation_ratio,
'reserved': CONF.reserved_host_memory_mb,
},
'DISK_GB': {
'total': 2000,
'min_unit': 1,
'max_unit': 2000,
'step_size': 1,
'allocation_ratio': CONF.initial_disk_allocation_ratio,
'reserved': CONF.reserved_host_disk_mb,
},
}
pt = provider_tree.ProviderTree()
nodename = 'fake-node'
pt.new_root(nodename, uuidsentinel.rp_uuid)
self._driver.update_provider_tree(pt, nodename)
inv = pt.data(nodename).inventory
self.assertEqual(expected_inv, inv)
|
aeroaks/httpProfiler | refs/heads/master | methods/tinycss2/tokenizer.py | 1 | from __future__ import unicode_literals
import re
import sys
from ..webencodings import ascii_lower
from .ast import (
AtKeywordToken, Comment, CurlyBracketsBlock, DimensionToken, FunctionBlock,
HashToken, IdentToken, LiteralToken, NumberToken, ParenthesesBlock,
ParseError, PercentageToken, SquareBracketsBlock, StringToken, URLToken,
UnicodeRangeToken, WhitespaceToken)
from ._compat import unichr
_NUMBER_RE = re.compile(r'[-+]?([0-9]*\.)?[0-9]+([eE][+-]?[0-9]+)?')
_HEX_ESCAPE_RE = re.compile(r'([0-9A-Fa-f]{1,6})[ \n\t]?')
def parse_component_value_list(css, preserve_comments=False):
"""Parse a list of component values.
:param css: A :term:`string`.
:returns: A list of :term:`component values`.
"""
css = (css.replace('\0', '\uFFFD')
# This turns out to be faster than a regexp:
.replace('\r\n', '\n').replace('\r', '\n').replace('\f', '\n'))
length = len(css)
token_start_pos = pos = 0 # Character index in the css source.
line = 1 # First line is line 1.
last_newline = -1
root = tokens = []
end_char = None # Pop the stack when encountering this character.
stack = [] # Stack of nested blocks: (tokens, end_char) tuples.
while pos < length:
newline = css.rfind('\n', token_start_pos, pos)
if newline != -1:
line += 1 + css.count('\n', token_start_pos, newline)
last_newline = newline
# First character in a line is in column 1.
column = pos - last_newline
token_start_pos = pos
c = css[pos]
if c in ' \n\t':
pos += 1
while css.startswith((' ', '\n', '\t'), pos):
pos += 1
tokens.append(WhitespaceToken(line, column))
continue
elif (c in 'Uu' and pos + 2 < length and css[pos + 1] == '+'
and css[pos + 2] in '0123456789abcdefABCDEF?'):
start, end, pos = _consume_unicode_range(css, pos + 2)
tokens.append(UnicodeRangeToken(line, column, start, end))
continue
elif _is_ident_start(css, pos):
value, pos = _consume_ident(css, pos)
if not css.startswith('(', pos): # Not a function
tokens.append(IdentToken(line, column, value))
continue
pos += 1 # Skip the '('
if ascii_lower(value) == 'url':
value, pos = _consume_url(css, pos)
tokens.append(
URLToken(line, column, value) if value is not None
else ParseError(line, column, 'bad-url', 'bad URL token'))
continue
arguments = []
tokens.append(FunctionBlock(line, column, value, arguments))
stack.append((tokens, end_char))
end_char = ')'
tokens = arguments
continue
match = _NUMBER_RE.match(css, pos)
if match:
pos = match.end()
repr_ = css[token_start_pos:pos]
value = float(repr_)
int_value = int(repr_) if not any(match.groups()) else None
if pos < length and _is_ident_start(css, pos):
unit, pos = _consume_ident(css, pos)
tokens.append(DimensionToken(
line, column, value, int_value, repr_, unit))
elif css.startswith('%', pos):
pos += 1
tokens.append(PercentageToken(
line, column, value, int_value, repr_))
else:
tokens.append(NumberToken(
line, column, value, int_value, repr_))
elif c == '@':
pos += 1
if pos < length and _is_ident_start(css, pos):
value, pos = _consume_ident(css, pos)
tokens.append(AtKeywordToken(line, column, value))
else:
tokens.append(LiteralToken(line, column, '@'))
elif c == '#':
pos += 1
if pos < length and (
css[pos] in '0123456789abcdefghijklmnopqrstuvwxyz'
'-_ABCDEFGHIJKLMNOPQRSTUVWXYZ'
or ord(css[pos]) > 0x7F # Non-ASCII
# Valid escape:
or (css[pos] == '\\' and not css.startswith('\\\n', pos))):
is_identifier = _is_ident_start(css, pos)
value, pos = _consume_ident(css, pos)
tokens.append(HashToken(line, column, value, is_identifier))
else:
tokens.append(LiteralToken(line, column, '#'))
elif c == '{':
content = []
tokens.append(CurlyBracketsBlock(line, column, content))
stack.append((tokens, end_char))
end_char = '}'
tokens = content
pos += 1
elif c == '[':
content = []
tokens.append(SquareBracketsBlock(line, column, content))
stack.append((tokens, end_char))
end_char = ']'
tokens = content
pos += 1
elif c == '(':
content = []
tokens.append(ParenthesesBlock(line, column, content))
stack.append((tokens, end_char))
end_char = ')'
tokens = content
pos += 1
elif c == end_char: # Matching }, ] or )
# The top-level end_char is None (never equal to a character),
# so we never get here if the stack is empty.
tokens, end_char = stack.pop()
pos += 1
elif c in '}])':
tokens.append(ParseError(line, column, c, 'Unmatched ' + c))
pos += 1
elif c in ('"', "'"):
value, pos = _consume_quoted_string(css, pos)
tokens.append(
StringToken(line, column, value) if value is not None
else ParseError(line, column, 'bad-string',
'bad string token'))
elif css.startswith('/*', pos): # Comment
pos = css.find('*/', pos + 2)
if pos == -1:
if preserve_comments:
tokens.append(
Comment(line, column, css[token_start_pos + 2:]))
break
if preserve_comments:
tokens.append(
Comment(line, column, css[token_start_pos + 2:pos]))
pos += 2
elif css.startswith('<!--', pos):
tokens.append(LiteralToken(line, column, '<!--'))
pos += 4
elif css.startswith('-->', pos):
tokens.append(LiteralToken(line, column, '-->'))
pos += 3
elif css.startswith('||', pos):
tokens.append(LiteralToken(line, column, '||'))
pos += 2
elif c in '~|^$*':
pos += 1
if css.startswith('=', pos):
pos += 1
tokens.append(LiteralToken(line, column, c + '='))
else:
tokens.append(LiteralToken(line, column, c))
else:
tokens.append(LiteralToken(line, column, c))
pos += 1
return root
def _is_ident_start(css, pos):
"""Return True if the given position is the start of a CSS identifier."""
c = css[pos]
length = len(css)
# if c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_':
# return True # Fast path XXX
if c == '-' and pos + 1 < length:
pos += 1
c = css[pos]
return (
c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_'
or ord(c) > 0x7F # Non-ASCII
# Valid escape:
or (c == '\\' and not css.startswith('\\\n', pos)))
def _consume_ident(css, pos):
"""Return (unescaped_value, new_pos).
Assumes pos starts at a valid identifier. See :func:`_is_ident_start`.
"""
# http://dev.w3.org/csswg/css-syntax/#consume-a-name
chunks = []
length = len(css)
start_pos = pos
while pos < length:
c = css[pos]
if c in ('abcdefghijklmnopqrstuvwxyz-_0123456789'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ') or ord(c) > 0x7F:
pos += 1
elif c == '\\' and not css.startswith('\\\n', pos):
# Valid escape
chunks.append(css[start_pos:pos])
c, pos = _consume_escape(css, pos + 1)
chunks.append(c)
start_pos = pos
else:
break
chunks.append(css[start_pos:pos])
return ''.join(chunks), pos
def _consume_quoted_string(css, pos):
"""Return (unescaped_value, new_pos)."""
# http://dev.w3.org/csswg/css-syntax/#consume-a-string-token
quote = css[pos]
assert quote in ('"', "'")
pos += 1
chunks = []
length = len(css)
start_pos = pos
while pos < length:
c = css[pos]
if c == quote:
chunks.append(css[start_pos:pos])
pos += 1
break
elif c == '\\':
chunks.append(css[start_pos:pos])
pos += 1
if pos < length:
if css[pos] == '\n': # Ignore escaped newlines
pos += 1
else:
c, pos = _consume_escape(css, pos)
chunks.append(c)
# else: Escaped EOF, do nothing
start_pos = pos
elif c == '\n': # Unescaped newline
return None, pos # bad-string
else:
pos += 1
else:
chunks.append(css[start_pos:pos])
return ''.join(chunks), pos
def _consume_escape(css, pos):
r"""Return (unescaped_char, new_pos).
Assumes a valid escape: pos is just after '\' and not followed by '\n'.
"""
# http://dev.w3.org/csswg/css-syntax/#consume-an-escaped-character
hex_match = _HEX_ESCAPE_RE.match(css, pos)
if hex_match:
codepoint = int(hex_match.group(1), 16)
return (
unichr(codepoint) if 0 < codepoint <= sys.maxunicode else '\uFFFD',
hex_match.end())
elif pos < len(css):
return css[pos], pos + 1
else:
return '\uFFFD', pos
def _consume_url(css, pos):
"""Return (unescaped_url, new_pos)
The given pos is assumed to be just after the '(' of 'url('.
"""
length = len(css)
# http://dev.w3.org/csswg/css-syntax/#consume-a-url-token
# Skip whitespace
while css.startswith((' ', '\n', '\t'), pos):
pos += 1
if pos >= length: # EOF
return '', pos
c = css[pos]
if c in ('"', "'"):
value, pos = _consume_quoted_string(css, pos)
elif c == ')':
return '', pos + 1
else:
chunks = []
start_pos = pos
while 1:
if pos >= length: # EOF
chunks.append(css[start_pos:pos])
return ''.join(chunks), pos
c = css[pos]
if c == ')':
chunks.append(css[start_pos:pos])
pos += 1
return ''.join(chunks), pos
elif c in ' \n\t':
chunks.append(css[start_pos:pos])
value = ''.join(chunks)
pos += 1
break
elif c == '\\' and not css.startswith('\\\n', pos):
# Valid escape
chunks.append(css[start_pos:pos])
c, pos = _consume_escape(css, pos + 1)
chunks.append(c)
start_pos = pos
elif (c in
'"\'('
# http://dev.w3.org/csswg/css-syntax/#non-printable-character
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0e'
'\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19'
'\x1a\x1b\x1c\x1d\x1e\x1f\x7f'):
value = None # Parse error
pos += 1
break
else:
pos += 1
if value is not None:
while css.startswith((' ', '\n', '\t'), pos):
pos += 1
if pos < length:
if css[pos] == ')':
return value, pos + 1
else:
return value, pos
# http://dev.w3.org/csswg/css-syntax/#consume-the-remnants-of-a-bad-url0
while pos < length:
if css.startswith('\\)', pos):
pos += 2
elif css[pos] == ')':
pos += 1
break
else:
pos += 1
return None, pos # bad-url
def _consume_unicode_range(css, pos):
"""Return (range, new_pos)
The given pos is assume to be just after the '+' of 'U+' or 'u+'.
"""
# http://dev.w3.org/csswg/css-syntax/#consume-a-unicode-range-token
length = len(css)
start_pos = pos
max_pos = min(pos + 6, length)
while pos < max_pos and css[pos] in '0123456789abcdefABCDEF':
pos += 1
start = css[start_pos:pos]
start_pos = pos
# Same max_pos as before: total of hex digits and question marks <= 6
while pos < max_pos and css[pos] == '?':
pos += 1
question_marks = pos - start_pos
if question_marks:
end = start + 'F' * question_marks
start = start + '0' * question_marks
elif (pos + 1 < length and css[pos] == '-'
and css[pos + 1] in '0123456789abcdefABCDEF'):
pos += 1
start_pos = pos
max_pos = min(pos + 6, length)
while pos < max_pos and css[pos] in '0123456789abcdefABCDEF':
pos += 1
end = css[start_pos:pos]
else:
end = start
return int(start, 16), int(end, 16), pos
|
yangpeiyong/Sigil | refs/heads/master | src/Resource_Files/python_pkg/windows_python_gather.py | 3 | #!/usr/bin/env python3
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
import sys, os, inspect, shutil, platform, textwrap, py_compile, site
from python_paths import py_ver, py_lib, sys_dlls, py_exe, py_inc, py_dest, tmp_prefix, proj_name
# Python standard modules location
srcdir = os.path.dirname(inspect.getfile(os))
pybase = os.path.dirname(srcdir)
# Where we're going to copy stuff
lib_dir = os.path.join(py_dest, 'Lib')
dll_dir = os.path.join(py_dest, 'DLLs')
site_dest = os.path.join(lib_dir, 'site-packages')
# A hack. This must eventually be set by a postinstall script, or
# possibly Sigil itself at runtime. Installing the 32-bit version
# of Sigil on 64-bit Windows will break this hardcoded crap.
pyvenv_home_dir = r'C:\Program Files\%s\Python3'% proj_name
# Cherry-picked additional and/or modified site modules
site_packages = [ ('lxml', 'd'),
('six.py', 'f'),
('bs4', 'd'),
('html5lib','d'),
('PIL', 'd'),
('regex.py','f'),
('_regex.pyd','f'),
('_regex_core.py','f'),
('test_regex.py', 'f')]
def copy_site_packages():
for pkg, typ in site_packages:
found = False
for path in site.getsitepackages():
if not found:
for entry in os.listdir(path):
if entry == pkg:
if typ == 'd' and os.path.isdir(os.path.join(path, entry)):
shutil.copytree(os.path.join(path, entry), os.path.join(site_dest, entry), ignore=ignore_in_dirs)
found = True
break
else:
if os.path.isfile(os.path.join(path, entry)):
shutil.copy2(os.path.join(path, entry), os.path.join(site_dest, entry))
found = True
break
else:
break
def ignore_in_dirs(base, items, ignored_dirs=None):
ans = []
if ignored_dirs is None:
ignored_dirs = {'.svn', '.bzr', '.git', 'test', 'tests', 'testing', '__pycache__'}
for name in items:
path = os.path.join(base, name)
if os.path.isdir(path):
if name in ignored_dirs: # or not os.path.exists(os.path.join(path, '__init__.py')):
ans.append(name)
#else:
# if name.rpartition('.')[-1] not in ('py', 'pyd', 'dll'):
# ans.append(name)
return ans
def dll_walk():
shutil.copytree(os.path.join(pybase, "DLLs"), dll_dir,
ignore=shutil.ignore_patterns('msvc*.dll', 'Microsoft.*'))
'''for dirpath, dirnames, filenames in os.walk(r'C:\Python%s\Lib'%py_ver):
if os.path.basename(dirpath) == 'pythonwin':
continue
for f in filenames:
if f.lower().endswith('.dll'):
f = os.path.join(dirpath, f)
shutil.copy2(f, dll_dir)'''
def copy_tk_tcl():
def ignore_lib(root, items):
ans = []
for x in items:
ext = os.path.splitext(x)[1]
if (not ext and (x in ('demos', 'tzdata'))) or \
(ext in ('.chm', '.htm', '.txt')):
ans.append(x)
return ans
src = os.path.join(pybase, "tcl")
for entry in os.listdir(src):
if entry in ('tk8.6', 'tcl8.6'):
if os.path.isdir(os.path.join(src, entry)):
shutil.copytree(os.path.join(src, entry), os.path.join(lib_dir, entry), ignore=ignore_lib)
def copy_pylib():
#shutil.copy2(py_lib, py_dest)
try:
shutil.copy2(os.path.join(sys_dlls, 'python%s.dll'%py_ver), py_dest)
shutil.copy2(os.path.join(sys_dlls, 'python%s.dll'%py_ver), tmp_prefix)
except:
print ('Couldn\'t find the Python%s.dll file. May need to include -DSYS_DLL_DIR="c:\windows\syswow64" in the cmake command.'%py_ver)
exit
try:
shutil.copy2(os.path.join(sys_dlls, 'pywintypes%s.dll'%py_ver), py_dest)
shutil.copy2(os.path.join(sys_dlls, 'pythoncom%s.dll'%py_ver), py_dest)
except:
pass
shutil.copy2(py_exe, os.path.join(py_dest, "sigil-python3.exe"))
def copy_python():
def ignore_lib(root, items):
ans = []
for x in items:
ext = os.path.splitext(x)[1]
if (not ext and (x in ('demos', 'tests', 'test', 'idlelib', 'lib2to3', '__pycache__', 'site-packages'))) or \
(ext in ('.chm', '.htm', '.txt')):
ans.append(x)
return ans
shutil.copytree(os.path.join(pybase, "Lib"), lib_dir,
ignore=ignore_lib)
def compile_libs():
for x in os.walk(lib_dir):
for f in x[-1]:
if f.endswith('.py'):
y = os.path.join(x[0], f)
rel = os.path.relpath(y, lib_dir)
try:
py_compile.compile(y, cfile=y+'o',dfile=rel, doraise=True, optimize=2)
os.remove(y)
z = y+'c'
if os.path.exists(z):
os.remove(z)
except:
print ('Failed to byte-compile', y)
def create_site_py():
with open(os.path.join(lib_dir, 'site.py'), 'wb') as f:
f.write(bytes(textwrap.dedent('''\
import sys
import builtins
import os
import _sitebuiltins
def set_helper():
builtins.help = _sitebuiltins._Helper()
def fix_sys_path():
if os.sep == '/':
sys.path.append(os.path.join(sys.prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
else:
for path in sys.path:
py_ver = "".join(map(str, sys.version_info[:2]))
if os.path.basename(path) == "python" + py_ver + ".zip":
sys.path.remove(path)
sys.path.append(os.path.join(sys.prefix, "lib", "site-packages"))
def main():
try:
fix_sys_path()
set_helper()
except SystemExit as err:
if err.code is None:
return 0
if isinstance(err.code, int):
return err.code
print (err.code)
return 1
except:
import traceback
traceback.print_exc()
return 1
if not sys.flags.no_site:
main()
'''), 'UTF-8'))
def create_pyvenv():
with open(os.path.join(py_dest, 'pyvenv.cfg'), 'wb') as f:
f.write(bytes(textwrap.dedent('''\
home = %s
include-system-site-packages = false
version = 3.4.0
''') % pyvenv_home_dir, 'UTF-8'))
if __name__ == '__main__':
dll_walk()
copy_pylib()
copy_python()
copy_site_packages()
create_site_py()
create_pyvenv()
copy_tk_tcl()
compile_libs()
|
kikobr/projetoextintor | refs/heads/master | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/_mapping.py | 94 | # -*- coding: utf-8 -*-
"""
pygments.lexers._mapping
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer mapping defintions. This file is generated by itself. Everytime
you change something on a builtin lexer defintion, run this script from
the lexers folder to update it.
Do not alter the LEXERS dictionary by hand.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
LEXERS = {
'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AgdaLexer': ('pygments.lexers.functional', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
'AntlrJavaLexer': ('pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
'AntlrLexer': ('pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
'AntlrObjectiveCLexer': ('pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
'AntlrPerlLexer': ('pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
'AntlrPythonLexer': ('pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.text', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.other', 'AppleScript', ('applescript',), ('*.applescript',), ()),
'AspectJLexer': ('pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'AutoItLexer': ('pygments.lexers.other', 'AutoIt', ('autoit', 'Autoit'), ('*.au3',), ('text/x-autoit',)),
'AutohotkeyLexer': ('pygments.lexers.other', 'autohotkey', ('ahk', 'autohotkey'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
'AwkLexer': ('pygments.lexers.other', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BaseMakefileLexer': ('pygments.lexers.text', 'Base Makefile', ('basemake',), (), ()),
'BashLexer': ('pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.shell', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
'BatchLexer': ('pygments.lexers.shell', 'Batchfile', ('bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
'BlitzBasicLexer': ('pygments.lexers.compiled', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
'BlitzMaxLexer': ('pygments.lexers.compiled', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
'BooLexer': ('pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
'BrainfuckLexer': ('pygments.lexers.other', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
'BroLexer': ('pygments.lexers.other', 'Bro', ('bro',), ('*.bro',), ()),
'BugsLexer': ('pygments.lexers.math', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
'CLexer': ('pygments.lexers.compiled', 'C', ('c',), ('*.c', '*.h', '*.idc'), ('text/x-chdr', 'text/x-csrc')),
'CMakeLexer': ('pygments.lexers.text', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
'CObjdumpLexer': ('pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
'CSharpAspxLexer': ('pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'CSharpLexer': ('pygments.lexers.dotnet', 'C#', ('csharp', 'c#'), ('*.cs',), ('text/x-csharp',)),
'Ca65Lexer': ('pygments.lexers.asm', 'ca65', ('ca65',), ('*.s',), ()),
'CbmBasicV2Lexer': ('pygments.lexers.other', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
'CeylonLexer': ('pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
'Cfengine3Lexer': ('pygments.lexers.other', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
'CheetahHtmlLexer': ('pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
'CheetahJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Cheetah', ('js+cheetah', 'javascript+cheetah', 'js+spitfire', 'javascript+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'ClayLexer': ('pygments.lexers.compiled', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
'ClojureLexer': ('pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
'CobolFreeformatLexer': ('pygments.lexers.compiled', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
'CobolLexer': ('pygments.lexers.compiled', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
'CoqLexer': ('pygments.lexers.functional', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
'CrocLexer': ('pygments.lexers.agile', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
'CssDjangoLexer': ('pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), (), ('text/css+django', 'text/css+jinja')),
'CssErbLexer': ('pygments.lexers.templates', 'CSS+Ruby', ('css+erb', 'css+ruby'), (), ('text/css+ruby',)),
'CssGenshiLexer': ('pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
'CssLexer': ('pygments.lexers.web', 'CSS', ('css',), ('*.css',), ('text/css',)),
'CssPhpLexer': ('pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
'CssSmartyLexer': ('pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
'CudaLexer': ('pygments.lexers.compiled', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
'CythonLexer': ('pygments.lexers.compiled', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
'DLexer': ('pygments.lexers.compiled', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
'DObjdumpLexer': ('pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
'DarcsPatchLexer': ('pygments.lexers.text', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
'DartLexer': ('pygments.lexers.web', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
'DebianControlLexer': ('pygments.lexers.text', 'Debian Control file', ('control', 'debcontrol'), ('control',), ()),
'DelphiLexer': ('pygments.lexers.compiled', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas',), ('text/x-pascal',)),
'DgLexer': ('pygments.lexers.agile', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
'DiffLexer': ('pygments.lexers.text', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
'DjangoLexer': ('pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
'DtdLexer': ('pygments.lexers.web', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
'DuelLexer': ('pygments.lexers.web', 'Duel', ('duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
'DylanConsoleLexer': ('pygments.lexers.compiled', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
'DylanLexer': ('pygments.lexers.compiled', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
'DylanLidLexer': ('pygments.lexers.compiled', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
'ECLLexer': ('pygments.lexers.other', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
'ECLexer': ('pygments.lexers.compiled', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
'EbnfLexer': ('pygments.lexers.text', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
'ElixirConsoleLexer': ('pygments.lexers.functional', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
'ElixirLexer': ('pygments.lexers.functional', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.exs'), ('text/x-elixir',)),
'ErbLexer': ('pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
'ErlangLexer': ('pygments.lexers.functional', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
'ErlangShellLexer': ('pygments.lexers.functional', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
'FSharpLexer': ('pygments.lexers.dotnet', 'FSharp', ('fsharp',), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
'FactorLexer': ('pygments.lexers.agile', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
'FancyLexer': ('pygments.lexers.agile', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
'FantomLexer': ('pygments.lexers.compiled', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90', '*.F', '*.F90'), ('text/x-fortran',)),
'FoxProLexer': ('pygments.lexers.foxpro', 'FoxPro', ('Clipper', 'XBase'), ('*.PRG', '*.prg'), ()),
'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
'GenshiLexer': ('pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
'GenshiTextLexer': ('pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
'GettextLexer': ('pygments.lexers.text', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
'GherkinLexer': ('pygments.lexers.other', 'Gherkin', ('Cucumber', 'cucumber', 'Gherkin', 'gherkin'), ('*.feature',), ('text/x-gherkin',)),
'GnuplotLexer': ('pygments.lexers.other', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.compiled', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GoodDataCLLexer': ('pygments.lexers.other', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
'GosuLexer': ('pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
'GosuTemplateLexer': ('pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
'GroffLexer': ('pygments.lexers.text', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
'GroovyLexer': ('pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy',), ('text/x-groovy',)),
'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml', 'HAML'), ('*.haml',), ('text/x-haml',)),
'HaskellLexer': ('pygments.lexers.functional', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
'HaxeLexer': ('pygments.lexers.web', 'Haxe', ('hx', 'Haxe', 'haxe', 'haXe', 'hxsl'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.web', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
'HtmlPhpLexer': ('pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
'HtmlSmartyLexer': ('pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
'HttpLexer': ('pygments.lexers.text', 'HTTP', ('http',), (), ()),
'HxmlLexer': ('pygments.lexers.text', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
'HybrisLexer': ('pygments.lexers.other', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
'IDLLexer': ('pygments.lexers.math', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
'IgorLexer': ('pygments.lexers.math', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
'IniLexer': ('pygments.lexers.text', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg'), ('text/x-ini',)),
'IoLexer': ('pygments.lexers.agile', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
'IokeLexer': ('pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
'IrcLogsLexer': ('pygments.lexers.text', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
'JadeLexer': ('pygments.lexers.web', 'Jade', ('jade', 'JADE'), ('*.jade',), ('text/x-jade',)),
'JagsLexer': ('pygments.lexers.math', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
'JavaLexer': ('pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
'JavascriptDjangoLexer': ('pygments.lexers.templates', 'JavaScript+Django/Jinja', ('js+django', 'javascript+django', 'js+jinja', 'javascript+jinja'), (), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
'JavascriptErbLexer': ('pygments.lexers.templates', 'JavaScript+Ruby', ('js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
'JavascriptGenshiLexer': ('pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
'JavascriptLexer': ('pygments.lexers.web', 'JavaScript', ('js', 'javascript'), ('*.js',), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
'JavascriptPhpLexer': ('pygments.lexers.templates', 'JavaScript+PHP', ('js+php', 'javascript+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
'JavascriptSmartyLexer': ('pygments.lexers.templates', 'JavaScript+Smarty', ('js+smarty', 'javascript+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
'JsonLexer': ('pygments.lexers.web', 'JSON', ('json',), ('*.json',), ('application/json',)),
'JspLexer': ('pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
'JuliaConsoleLexer': ('pygments.lexers.math', 'Julia console', ('jlcon',), (), ()),
'JuliaLexer': ('pygments.lexers.math', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
'KconfigLexer': ('pygments.lexers.other', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
'KokaLexer': ('pygments.lexers.functional', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
'KotlinLexer': ('pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt',), ('text/x-kotlin',)),
'LassoCssLexer': ('pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
'LassoHtmlLexer': ('pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
'LassoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Lasso', ('js+lasso', 'javascript+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
'LassoLexer': ('pygments.lexers.web', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
'LassoXmlLexer': ('pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
'LighttpdConfLexer': ('pygments.lexers.text', 'Lighttpd configuration file', ('lighty', 'lighttpd'), (), ('text/x-lighttpd-conf',)),
'LiterateAgdaLexer': ('pygments.lexers.functional', 'Literate Agda', ('lagda', 'literate-agda'), ('*.lagda',), ('text/x-literate-agda',)),
'LiterateHaskellLexer': ('pygments.lexers.functional', 'Literate Haskell', ('lhs', 'literate-haskell', 'lhaskell'), ('*.lhs',), ('text/x-literate-haskell',)),
'LiveScriptLexer': ('pygments.lexers.web', 'LiveScript', ('live-script', 'livescript'), ('*.ls',), ('text/livescript',)),
'LlvmLexer': ('pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
'LogosLexer': ('pygments.lexers.compiled', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
'LogtalkLexer': ('pygments.lexers.other', 'Logtalk', ('logtalk',), ('*.lgt',), ('text/x-logtalk',)),
'LuaLexer': ('pygments.lexers.agile', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
'MOOCodeLexer': ('pygments.lexers.other', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
'MakefileLexer': ('pygments.lexers.text', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
'MakoCssLexer': ('pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
'MakoHtmlLexer': ('pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
'MakoJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Mako', ('js+mako', 'javascript+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
'MakoLexer': ('pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
'MakoXmlLexer': ('pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
'MaqlLexer': ('pygments.lexers.other', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
'MasonLexer': ('pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
'MatlabLexer': ('pygments.lexers.math', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.other', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
'Modula2Lexer': ('pygments.lexers.compiled', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MonkeyLexer': ('pygments.lexers.compiled', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
'MoonScriptLexer': ('pygments.lexers.agile', 'MoonScript', ('moon', 'moonscript'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
'MscgenLexer': ('pygments.lexers.other', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),
'MySqlLexer': ('pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
'MyghtyCssLexer': ('pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
'MyghtyHtmlLexer': ('pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
'MyghtyJavascriptLexer': ('pygments.lexers.templates', 'JavaScript+Myghty', ('js+myghty', 'javascript+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
'MyghtyLexer': ('pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
'MyghtyXmlLexer': ('pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
'NSISLexer': ('pygments.lexers.other', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
'NasmLexer': ('pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
'NemerleLexer': ('pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
'NesCLexer': ('pygments.lexers.compiled', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
'NewLispLexer': ('pygments.lexers.functional', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl'), ('text/x-newlisp', 'application/x-newlisp')),
'NewspeakLexer': ('pygments.lexers.other', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
'NginxConfLexer': ('pygments.lexers.text', 'Nginx configuration file', ('nginx',), (), ('text/x-nginx-conf',)),
'NimrodLexer': ('pygments.lexers.compiled', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nimrod',)),
'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.compiled', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
'ObjectiveCppLexer': ('pygments.lexers.compiled', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
'ObjectiveJLexer': ('pygments.lexers.web', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.functional', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OctaveLexer': ('pygments.lexers.math', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
'OocLexer': ('pygments.lexers.compiled', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
'OpaLexer': ('pygments.lexers.functional', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
'OpenEdgeLexer': ('pygments.lexers.other', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
'Perl6Lexer': ('pygments.lexers.agile', 'Perl6', ('perl6', 'pl6'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6'), ('text/x-perl6', 'application/x-perl6')),
'PerlLexer': ('pygments.lexers.agile', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm'), ('text/x-perl', 'application/x-perl')),
'PhpLexer': ('pygments.lexers.web', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
'PlPgsqlLexer': ('pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
'PostScriptLexer': ('pygments.lexers.other', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
'PostgresConsoleLexer': ('pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
'PostgresLexer': ('pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
'PovrayLexer': ('pygments.lexers.other', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
'PowerShellLexer': ('pygments.lexers.shell', 'PowerShell', ('powershell', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
'PrologLexer': ('pygments.lexers.compiled', 'Prolog', ('prolog',), ('*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
'PropertiesLexer': ('pygments.lexers.text', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
'ProtoBufLexer': ('pygments.lexers.other', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
'PuppetLexer': ('pygments.lexers.other', 'Puppet', ('puppet',), ('*.pp',), ()),
'PyPyLogLexer': ('pygments.lexers.text', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
'Python3Lexer': ('pygments.lexers.agile', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
'Python3TracebackLexer': ('pygments.lexers.agile', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.agile', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py', 'sage'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac', '*.sage'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.agile', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
'QmlLexer': ('pygments.lexers.web', 'QML', ('qml', 'Qt Meta Language', 'Qt modeling Language'), ('*.qml',), ('application/x-qml',)),
'RConsoleLexer': ('pygments.lexers.math', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RPMSpecLexer': ('pygments.lexers.other', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
'RacketLexer': ('pygments.lexers.functional', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktl'), ('text/x-racket', 'application/x-racket')),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
'RagelEmbeddedLexer': ('pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
'RagelJavaLexer': ('pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
'RagelLexer': ('pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
'RagelObjectiveCLexer': ('pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
'RagelRubyLexer': ('pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
'RawTokenLexer': ('pygments.lexers.special', 'Raw token data', ('raw',), (), ('application/x-pygments-tokens',)),
'RdLexer': ('pygments.lexers.math', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
'RebolLexer': ('pygments.lexers.other', 'REBOL', ('rebol',), ('*.r', '*.r3'), ('text/x-rebol',)),
'RedcodeLexer': ('pygments.lexers.other', 'Redcode', ('redcode',), ('*.cw',), ()),
'RegeditLexer': ('pygments.lexers.text', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
'RexxLexer': ('pygments.lexers.other', 'Rexx', ('rexx', 'ARexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
'RhtmlLexer': ('pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
'RobotFrameworkLexer': ('pygments.lexers.other', 'RobotFramework', ('RobotFramework', 'robotframework'), ('*.txt', '*.robot'), ('text/x-robotframework',)),
'RstLexer': ('pygments.lexers.text', 'reStructuredText', ('rst', 'rest', 'restructuredtext'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby'), ('text/x-ruby', 'application/x-ruby')),
'RustLexer': ('pygments.lexers.compiled', 'Rust', ('rust',), ('*.rs', '*.rc'), ('text/x-rustsrc',)),
'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
'SMLLexer': ('pygments.lexers.functional', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
'SassLexer': ('pygments.lexers.web', 'Sass', ('sass', 'SASS'), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'ScamlLexer': ('pygments.lexers.web', 'Scaml', ('scaml', 'SCAML'), ('*.scaml',), ('text/x-scaml',)),
'SchemeLexer': ('pygments.lexers.functional', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
'ScilabLexer': ('pygments.lexers.math', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
'ScssLexer': ('pygments.lexers.web', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
'ShellSessionLexer': ('pygments.lexers.shell', 'Shell Session', ('shell-session',), ('*.shell-session',), ('application/x-sh-session',)),
'SmaliLexer': ('pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
'SmartyLexer': ('pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
'SnobolLexer': ('pygments.lexers.other', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
'SourcePawnLexer': ('pygments.lexers.other', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
'SourcesListLexer': ('pygments.lexers.text', 'Debian Sourcelist', ('sourceslist', 'sources.list', 'debsources'), ('sources.list',), ()),
'SqlLexer': ('pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
'SqliteConsoleLexer': ('pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
'SquidConfLexer': ('pygments.lexers.text', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
'SspLexer': ('pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
'StanLexer': ('pygments.lexers.math', 'Stan', ('stan',), ('*.stan',), ()),
'SwigLexer': ('pygments.lexers.compiled', 'SWIG', ('Swig', 'swig'), ('*.swg', '*.i'), ('text/swig',)),
'SystemVerilogLexer': ('pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
'TclLexer': ('pygments.lexers.agile', 'Tcl', ('tcl',), ('*.tcl',), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
'TcshLexer': ('pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
'TeaTemplateLexer': ('pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
'TexLexer': ('pygments.lexers.text', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
'TextLexer': ('pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
'TreetopLexer': ('pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
'TypeScriptLexer': ('pygments.lexers.web', 'TypeScript', ('ts',), ('*.ts',), ('text/x-typescript',)),
'UrbiscriptLexer': ('pygments.lexers.other', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
'VGLLexer': ('pygments.lexers.other', 'VGL', ('vgl',), ('*.rpf',), ()),
'ValaLexer': ('pygments.lexers.compiled', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
'VbNetAspxLexer': ('pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
'VbNetLexer': ('pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
'VelocityHtmlLexer': ('pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
'VelocityLexer': ('pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
'VelocityXmlLexer': ('pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
'VerilogLexer': ('pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
'VhdlLexer': ('pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
'VimLexer': ('pygments.lexers.text', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
'XQueryLexer': ('pygments.lexers.web', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
'XmlDjangoLexer': ('pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), (), ('application/xml+django', 'application/xml+jinja')),
'XmlErbLexer': ('pygments.lexers.templates', 'XML+Ruby', ('xml+erb', 'xml+ruby'), (), ('application/xml+ruby',)),
'XmlLexer': ('pygments.lexers.web', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
'XmlPhpLexer': ('pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
'XmlSmartyLexer': ('pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
'XsltLexer': ('pygments.lexers.web', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
'XtendLexer': ('pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
'YamlLexer': ('pygments.lexers.text', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
}
if __name__ == '__main__':
import sys
import os
# lookup lexers
found_lexers = []
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
for filename in os.listdir('.'):
if filename.endswith('.py') and not filename.startswith('_'):
module_name = 'pygments.lexers.%s' % filename[:-3]
print(module_name)
module = __import__(module_name, None, None, [''])
for lexer_name in module.__all__:
lexer = getattr(module, lexer_name)
found_lexers.append(
'%r: %r' % (lexer_name,
(module_name,
lexer.name,
tuple(lexer.aliases),
tuple(lexer.filenames),
tuple(lexer.mimetypes))))
# sort them, that should make the diff files for svn smaller
found_lexers.sort()
# extract useful sourcecode from this file
f = open(__file__)
try:
content = f.read()
finally:
f.close()
header = content[:content.find('LEXERS = {')]
footer = content[content.find("if __name__ == '__main__':"):]
# write new file
f = open(__file__, 'wb')
f.write(header)
f.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers))
f.write(footer)
f.close()
|
bear/ronkyuu | refs/heads/dependabot/pip/urllib3-1.26.5 | tests/test_tools.py | 1 | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2013-2020 by Mike Taylor and Kartik Prabhu
:license: MIT, see LICENSE for more details.
"""
import unittest
from ronkyuu.tools import parse_link_header
class TestParseLinkRels(unittest.TestCase):
def runTest(self):
headers = [
'</test/1/webmention>; rel=webmention',
'</test/1/webmention>; rel="webmention"',
]
for s in headers:
d = parse_link_header(s)
assert 'webmention' in d.keys()
|
elitegreg/mudpy | refs/heads/master | mudpy/room.py | 1 | from .object import Object
from mudpy.database import ObjectCache
from mudpy.gameproperty import add_gameproperty
import yaml
class Room(Object, yaml.YAMLObject):
__slots__ = tuple()
yaml_loader = yaml.SafeLoader
yaml_tag = '!Room'
def __setstate__(self, newstate):
super().__setstate__(newstate)
self.cached_exits = dict()
if not self.exits:
self.exits = dict()
def get_exit(self, direction):
# TODO we can get rid of lower() here if we enforce
# the exit to be stored in lowercase elsewhere
direction = direction.lower()
try:
return self.cached_exits[direction]
except KeyError:
pass
exit = ObjectCache().get(self.exits[direction])
self.cached_exits[direction] = exit.weakref()
return exit
add_gameproperty(Room, 'short_description', readonly=True)
add_gameproperty(Room, 'long_description', readonly=True)
add_gameproperty(Room, 'exits')
add_gameproperty(Room, 'cached_exits', tmp=True)
|
StackPointCloud/ansible-modules-extras | refs/heads/devel | windows/win_updates.py | 64 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Matt Davis <mdavis_ansible@rolpdog.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_updates
version_added: "2.0"
short_description: Download and install Windows updates
description:
- Searches, downloads, and installs Windows updates synchronously by automating the Windows Update client
options:
category_names:
description:
- A scalar or list of categories to install updates from
required: false
default: ["CriticalUpdates","SecurityUpdates","UpdateRollups"]
choices:
- Application
- Connectors
- CriticalUpdates
- DefinitionUpdates
- DeveloperKits
- FeaturePacks
- Guidance
- SecurityUpdates
- ServicePacks
- Tools
- UpdateRollups
- Updates
state:
description:
- Controls whether found updates are returned as a list or actually installed.
- This module also supports Ansible check mode, which has the same effect as setting state=searched
required: false
default: installed
choices:
- installed
- searched
log_path:
description:
- If set, win_updates will append update progress to the specified file. The directory must already exist.
required: false
author: "Matt Davis (@mattdavispdx)"
notes:
- win_updates must be run by a user with membership in the local Administrators group
- win_updates will use the default update service configured for the machine (Windows Update, Microsoft Update, WSUS, etc)
- win_updates does not manage reboots, but will signal when a reboot is required with the reboot_required return value.
- win_updates can take a significant amount of time to complete (hours, in some cases). Performance depends on many factors, including OS version, number of updates, system load, and update server load.
'''
EXAMPLES = '''
# Install all security, critical, and rollup updates
win_updates:
category_names: ['SecurityUpdates','CriticalUpdates','UpdateRollups']
# Install only security updates
win_updates: category_names=SecurityUpdates
# Search-only, return list of found updates (if any), log to c:\ansible_wu.txt
win_updates: category_names=SecurityUpdates state=searched log_path=c:/ansible_wu.txt
'''
RETURN = '''
reboot_required:
description: True when the target server requires a reboot to complete updates (no further updates can be installed until after a reboot)
returned: success
type: boolean
sample: True
updates:
description: List of updates that were found/installed
returned: success
type: dictionary
sample:
contains:
title:
description: Display name
returned: always
type: string
sample: "Security Update for Windows Server 2012 R2 (KB3004365)"
kb:
description: A list of KB article IDs that apply to the update
returned: always
type: list of strings
sample: [ '3004365' ]
id:
description: Internal Windows Update GUID
returned: always
type: string (guid)
sample: "fb95c1c8-de23-4089-ae29-fd3351d55421"
installed:
description: Was the update successfully installed
returned: always
type: boolean
sample: True
failure_hresult_code:
description: The HRESULT code from a failed update
returned: on install failure
type: boolean
sample: 2147942402
found_update_count:
description: The number of updates found needing to be applied
returned: success
type: int
sample: 3
installed_update_count:
description: The number of updates successfully installed
returned: success
type: int
sample: 2
failed_update_count:
description: The number of updates that failed to install
returned: always
type: int
sample: 0
'''
|
jsgf/xen | refs/heads/master | tools/python/logging/logging-0.4.9.2/test/log_test3.py | 42 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests new fileConfig (not yet a complete test).
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import logging, logging.config
def doLog(logger):
logger.debug("Debug")
logger.info("Info")
logger.warning("Warning")
logger.error("Error")
logger.critical("Critical")
def main():
logging.config.fileConfig("log_test3.ini")
logger = logging.getLogger(None)
print "---------------------------------------------------"
print "-- Logging to root; messages appear on console only"
print "---------------------------------------------------"
doLog(logger)
print "----------------------------------------------------------------------"
print "-- Logging to log02; messages appear on console and in file python.log"
print "----------------------------------------------------------------------"
logger = logging.getLogger("log02")
doLog(logger)
print "--------------------------------------------------------------------------"
print "-- Logging to log02.log03; messages appear on console, in file python.log,"
print "-- and at logrecv.py tcp (if running. <= DEBUG messages will not appear)."
print "--------------------------------------------------------------------------"
logger = logging.getLogger("log02.log03")
doLog(logger)
print "-----------------------------------------------------------------------"
print "-- Logging to log02.log03.log04; messages appear only at logrecv.py udp"
print "-- (if running. <= INFO messages will not appear)."
print "-----------------------------------------------------------------------"
logger = logging.getLogger("log02.log03.log04")
doLog(logger)
print "--------------------------------------------------------------------"
print "-- Logging to log02.log03.log04.log05.log06; messages appear at"
print "-- logrecv.py udp (if running. < CRITICAL messages will not appear)."
print "--------------------------------------------------------------------"
logger = logging.getLogger("log02.log03.log04.log05.log06")
doLog(logger)
print "-- All done."
logging.shutdown()
if __name__ == "__main__":
main() |
kapouer/mapnik | refs/heads/master | tests/python_tests/filter_test.py | 3 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import *
from utilities import run_all
import mapnik
if hasattr(mapnik,'Expression'):
mapnik.Filter = mapnik.Expression
map_ = '''<Map>
<Style name="s">
<Rule>
<Filter><![CDATA[(([region]>=0) and ([region]<=50))]]></Filter>
</Rule>
<Rule>
<Filter><![CDATA[([region]>=0) and ([region]<=50)]]></Filter>
</Rule>
<Rule>
<Filter>
<![CDATA[
([region] >= 0)
and
([region] <= 50)
]]>
</Filter>
</Rule>
<Rule>
<Filter>([region]>=0) and ([region]<=50)</Filter>
</Rule>
<Rule>
<Filter>
([region] >= 0)
and
([region] <= 50)
</Filter>
</Rule>
</Style>
<Style name="s2" filter-mode="first">
<Rule>
</Rule>
<Rule>
</Rule>
</Style>
</Map>'''
def test_filter_init():
m = mapnik.Map(1,1)
mapnik.load_map_from_string(m,map_)
filters = []
filters.append(mapnik.Filter("([region]>=0) and ([region]<=50)"))
filters.append(mapnik.Filter("(([region]>=0) and ([region]<=50))"))
filters.append(mapnik.Filter("((([region]>=0) and ([region]<=50)))"))
filters.append(mapnik.Filter('((([region]>=0) and ([region]<=50)))'))
filters.append(mapnik.Filter('''((([region]>=0) and ([region]<=50)))'''))
filters.append(mapnik.Filter('''
((([region]>=0)
and
([region]<=50)))
'''))
filters.append(mapnik.Filter('''
([region]>=0)
and
([region]<=50)
'''))
filters.append(mapnik.Filter('''
([region]
>=
0)
and
([region]
<=
50)
'''))
s = m.find_style('s')
for r in s.rules:
filters.append(r.filter)
first = filters[0]
for f in filters:
eq_(str(first),str(f))
s = m.find_style('s2')
eq_(s.filter_mode,mapnik.filter_mode.FIRST)
def test_geometry_type_eval():
# clashing field called 'mapnik::geometry'
context2 = mapnik.Context()
context2.push('mapnik::geometry_type')
f = mapnik.Feature(context2,0)
f["mapnik::geometry_type"] = 'sneaky'
expr = mapnik.Expression("[mapnik::geometry_type]")
eq_(expr.evaluate(f),0)
expr = mapnik.Expression("[mapnik::geometry_type]")
context = mapnik.Context()
# no geometry
f = mapnik.Feature(context,0)
eq_(expr.evaluate(f),0)
eq_(mapnik.Expression("[mapnik::geometry_type]=0").evaluate(f),True)
# POINT = 1
f = mapnik.Feature(context,0)
f.add_geometries_from_wkt('POINT(10 40)')
eq_(expr.evaluate(f),1)
eq_(mapnik.Expression("[mapnik::geometry_type]=point").evaluate(f),True)
# LINESTRING = 2
f = mapnik.Feature(context,0)
f.add_geometries_from_wkt('LINESTRING (30 10, 10 30, 40 40)')
eq_(expr.evaluate(f),2)
eq_(mapnik.Expression("[mapnik::geometry_type]=linestring").evaluate(f),True)
# POLYGON = 3
f = mapnik.Feature(context,0)
f.add_geometries_from_wkt('POLYGON ((30 10, 10 20, 20 40, 40 40, 30 10))')
eq_(expr.evaluate(f),3)
eq_(mapnik.Expression("[mapnik::geometry_type]=polygon").evaluate(f),True)
# COLLECTION = 4
f = mapnik.Feature(context,0)
f.add_geometries_from_wkt('GEOMETRYCOLLECTION(POLYGON((1 1,2 1,2 2,1 2,1 1)),POINT(2 3),LINESTRING(2 3,3 4))')
eq_(expr.evaluate(f),4)
eq_(mapnik.Expression("[mapnik::geometry_type]=collection").evaluate(f),True)
def test_regex_match():
context = mapnik.Context()
context.push('name')
f = mapnik.Feature(context,0)
f["name"] = 'test'
expr = mapnik.Expression("[name].match('test')")
eq_(expr.evaluate(f),True) # 1 == True
def test_unicode_regex_match():
context = mapnik.Context()
context.push('name')
f = mapnik.Feature(context,0)
f["name"] = 'Québec'
expr = mapnik.Expression("[name].match('Québec')")
eq_(expr.evaluate(f),True) # 1 == True
def test_regex_replace():
context = mapnik.Context()
context.push('name')
f = mapnik.Feature(context,0)
f["name"] = 'test'
expr = mapnik.Expression("[name].replace('(\B)|( )','$1 ')")
eq_(expr.evaluate(f),'t e s t')
def test_unicode_regex_replace_to_str():
expr = mapnik.Expression("[name].replace('(\B)|( )','$1 ')")
eq_(str(expr),"[name].replace('(\B)|( )','$1 ')")
def test_unicode_regex_replace():
context = mapnik.Context()
context.push('name')
f = mapnik.Feature(context,0)
f["name"] = 'Québec'
expr = mapnik.Expression("[name].replace('(\B)|( )','$1 ')")
# will fail if -DBOOST_REGEX_HAS_ICU is not defined
eq_(expr.evaluate(f), u'Q u é b e c')
def test_float_precision():
context = mapnik.Context()
context.push('num')
f = mapnik.Feature(context,0)
f["num1"] = 1.0000
f["num2"] = 1.0001
eq_(f["num1"],1.0000)
eq_(f["num2"],1.0001)
expr = mapnik.Expression("[num1] = 1.0000")
eq_(expr.evaluate(f),True)
expr = mapnik.Expression("[num1].match('1')")
eq_(expr.evaluate(f),True)
expr = mapnik.Expression("[num2] = 1.0001")
eq_(expr.evaluate(f),True)
expr = mapnik.Expression("[num2].match('1.0001')")
eq_(expr.evaluate(f),True)
def test_string_matching_on_precision():
context = mapnik.Context()
context.push('num')
f = mapnik.Feature(context,0)
f["num"] = "1.0000"
eq_(f["num"],"1.0000")
expr = mapnik.Expression("[num].match('.*(^0|00)$')")
eq_(expr.evaluate(f),True)
def test_creation_of_null_value():
context = mapnik.Context()
context.push('nv')
f = mapnik.Feature(context,0)
f["nv"] = None
eq_(f["nv"],None)
eq_(f["nv"] is None,True)
# test boolean
f["nv"] = 0
eq_(f["nv"],0)
eq_(f["nv"] is not None,True)
def test_creation_of_bool():
context = mapnik.Context()
context.push('bool')
f = mapnik.Feature(context,0)
f["bool"] = True
eq_(f["bool"],True)
# TODO - will become int of 1 do to built in boost python conversion
# https://github.com/mapnik/mapnik/issues/1873
eq_(isinstance(f["bool"],bool) or isinstance(f["bool"],long),True)
f["bool"] = False
eq_(f["bool"],False)
eq_(isinstance(f["bool"],bool) or isinstance(f["bool"],long),True)
# test NoneType
f["bool"] = None
eq_(f["bool"],None)
eq_(isinstance(f["bool"],bool) or isinstance(f["bool"],long),False)
# test integer
f["bool"] = 0
eq_(f["bool"],0)
# https://github.com/mapnik/mapnik/issues/1873
# ugh, boost_python's built into converter does not work right
#eq_(isinstance(f["bool"],bool),False)
null_equality = [
['hello',False,unicode],
[u'',False,unicode],
[0,False,long],
[123,False,long],
[0.0,False,float],
[123.123,False,float],
[.1,False,float],
[False,False,long], # TODO - should become bool: https://github.com/mapnik/mapnik/issues/1873
[True,False,long], # TODO - should become bool: https://github.com/mapnik/mapnik/issues/1873
[None,True,None],
[2147483648,False,long],
[922337203685477580,False,long]
]
def test_expressions_with_null_equality():
for eq in null_equality:
context = mapnik.Context()
f = mapnik.Feature(context,0)
f["prop"] = eq[0]
eq_(f["prop"],eq[0])
if eq[0] is None:
eq_(f["prop"] is None, True)
else:
eq_(isinstance(f['prop'],eq[2]),True,'%s is not an instance of %s' % (f['prop'],eq[2]))
expr = mapnik.Expression("[prop] = null")
eq_(expr.evaluate(f),eq[1])
expr = mapnik.Expression("[prop] is null")
eq_(expr.evaluate(f),eq[1])
def test_expressions_with_null_equality2():
for eq in null_equality:
context = mapnik.Context()
f = mapnik.Feature(context,0)
f["prop"] = eq[0]
eq_(f["prop"],eq[0])
if eq[0] is None:
eq_(f["prop"] is None, True)
else:
eq_(isinstance(f['prop'],eq[2]),True,'%s is not an instance of %s' % (f['prop'],eq[2]))
# TODO - support `is not` syntax:
# https://github.com/mapnik/mapnik/issues/796
expr = mapnik.Expression("not [prop] is null")
eq_(expr.evaluate(f),not eq[1])
# https://github.com/mapnik/mapnik/issues/1642
expr = mapnik.Expression("[prop] != null")
eq_(expr.evaluate(f),not eq[1])
truthyness = [
[u'hello',True,unicode],
[u'',False,unicode],
[0,False,long],
[123,True,long],
[0.0,False,float],
[123.123,True,float],
[.1,True,float],
[False,False,long], # TODO - should become bool: https://github.com/mapnik/mapnik/issues/1873
[True,True,long], # TODO - should become bool: https://github.com/mapnik/mapnik/issues/1873
[None,False,None],
[2147483648,True,long],
[922337203685477580,True,long]
]
def test_expressions_for_thruthyness():
context = mapnik.Context()
for eq in truthyness:
f = mapnik.Feature(context,0)
f["prop"] = eq[0]
eq_(f["prop"],eq[0])
if eq[0] is None:
eq_(f["prop"] is None, True)
else:
eq_(isinstance(f['prop'],eq[2]),True,'%s is not an instance of %s' % (f['prop'],eq[2]))
expr = mapnik.Expression("[prop]")
eq_(expr.to_bool(f),eq[1])
expr = mapnik.Expression("not [prop]")
eq_(expr.to_bool(f),not eq[1])
expr = mapnik.Expression("! [prop]")
eq_(expr.to_bool(f),not eq[1])
# also test if feature does not have property at all
f2 = mapnik.Feature(context,1)
# no property existing will return value_null since
# https://github.com/mapnik/mapnik/commit/562fada9d0f680f59b2d9f396c95320a0d753479#include/mapnik/feature.hpp
eq_(f2["prop"] is None,True)
expr = mapnik.Expression("[prop]")
eq_(expr.evaluate(f2),None)
eq_(expr.to_bool(f2),False)
# https://github.com/mapnik/mapnik/issues/1859
def test_if_null_and_empty_string_are_equal():
context = mapnik.Context()
f = mapnik.Feature(context,0)
f["empty"] = u""
f["null"] = None
# ensure base assumptions are good
eq_(mapnik.Expression("[empty] = ''").to_bool(f),True)
eq_(mapnik.Expression("[null] = null").to_bool(f),True)
eq_(mapnik.Expression("[empty] != ''").to_bool(f),False)
eq_(mapnik.Expression("[null] != null").to_bool(f),False)
# now test expected behavior
eq_(mapnik.Expression("[null] = ''").to_bool(f),False)
eq_(mapnik.Expression("[empty] = null").to_bool(f),False)
eq_(mapnik.Expression("[empty] != null").to_bool(f),True)
# this one is the back compatibility shim
eq_(mapnik.Expression("[null] != ''").to_bool(f),False)
def test_filtering_nulls_and_empty_strings():
context = mapnik.Context()
f = mapnik.Feature(context,0)
f["prop"] = u"hello"
eq_(f["prop"],u"hello")
eq_(mapnik.Expression("[prop]").to_bool(f),True)
eq_(mapnik.Expression("! [prop]").to_bool(f),False)
eq_(mapnik.Expression("[prop] != null").to_bool(f),True)
eq_(mapnik.Expression("[prop] != ''").to_bool(f),True)
eq_(mapnik.Expression("[prop] != null and [prop] != ''").to_bool(f),True)
eq_(mapnik.Expression("[prop] != null or [prop] != ''").to_bool(f),True)
f["prop2"] = u""
eq_(f["prop2"],u"")
eq_(mapnik.Expression("[prop2]").to_bool(f),False)
eq_(mapnik.Expression("! [prop2]").to_bool(f),True)
eq_(mapnik.Expression("[prop2] != null").to_bool(f),True)
eq_(mapnik.Expression("[prop2] != ''").to_bool(f),False)
eq_(mapnik.Expression("[prop2] = ''").to_bool(f),True)
eq_(mapnik.Expression("[prop2] != null or [prop2] != ''").to_bool(f),True)
eq_(mapnik.Expression("[prop2] != null and [prop2] != ''").to_bool(f),False)
f["prop3"] = None
eq_(f["prop3"],None)
eq_(mapnik.Expression("[prop3]").to_bool(f),False)
eq_(mapnik.Expression("! [prop3]").to_bool(f),True)
eq_(mapnik.Expression("[prop3] != null").to_bool(f),False)
eq_(mapnik.Expression("[prop3] = null").to_bool(f),True)
# https://github.com/mapnik/mapnik/issues/1859
#eq_(mapnik.Expression("[prop3] != ''").to_bool(f),True)
eq_(mapnik.Expression("[prop3] != ''").to_bool(f),False)
eq_(mapnik.Expression("[prop3] = ''").to_bool(f),False)
# https://github.com/mapnik/mapnik/issues/1859
#eq_(mapnik.Expression("[prop3] != null or [prop3] != ''").to_bool(f),True)
eq_(mapnik.Expression("[prop3] != null or [prop3] != ''").to_bool(f),False)
eq_(mapnik.Expression("[prop3] != null and [prop3] != ''").to_bool(f),False)
# attr not existing should behave the same as prop3
eq_(mapnik.Expression("[prop4]").to_bool(f),False)
eq_(mapnik.Expression("! [prop4]").to_bool(f),True)
eq_(mapnik.Expression("[prop4] != null").to_bool(f),False)
eq_(mapnik.Expression("[prop4] = null").to_bool(f),True)
# https://github.com/mapnik/mapnik/issues/1859
##eq_(mapnik.Expression("[prop4] != ''").to_bool(f),True)
eq_(mapnik.Expression("[prop4] != ''").to_bool(f),False)
eq_(mapnik.Expression("[prop4] = ''").to_bool(f),False)
# https://github.com/mapnik/mapnik/issues/1859
##eq_(mapnik.Expression("[prop4] != null or [prop4] != ''").to_bool(f),True)
eq_(mapnik.Expression("[prop4] != null or [prop4] != ''").to_bool(f),False)
eq_(mapnik.Expression("[prop4] != null and [prop4] != ''").to_bool(f),False)
f["prop5"] = False
eq_(f["prop5"],False)
eq_(mapnik.Expression("[prop5]").to_bool(f),False)
eq_(mapnik.Expression("! [prop5]").to_bool(f),True)
eq_(mapnik.Expression("[prop5] != null").to_bool(f),True)
eq_(mapnik.Expression("[prop5] = null").to_bool(f),False)
eq_(mapnik.Expression("[prop5] != ''").to_bool(f),True)
eq_(mapnik.Expression("[prop5] = ''").to_bool(f),False)
eq_(mapnik.Expression("[prop5] != null or [prop5] != ''").to_bool(f),True)
eq_(mapnik.Expression("[prop5] != null and [prop5] != ''").to_bool(f),True)
# note, we need to do [prop5] != 0 here instead of false due to this bug:
# https://github.com/mapnik/mapnik/issues/1873
eq_(mapnik.Expression("[prop5] != null and [prop5] != '' and [prop5] != 0").to_bool(f),False)
# https://github.com/mapnik/mapnik/issues/1872
def test_falseyness_comparision():
context = mapnik.Context()
f = mapnik.Feature(context,0)
f["prop"] = 0
eq_(mapnik.Expression("[prop]").to_bool(f),False)
eq_(mapnik.Expression("[prop] = false").to_bool(f),True)
eq_(mapnik.Expression("not [prop] != false").to_bool(f),True)
eq_(mapnik.Expression("not [prop] = true").to_bool(f),True)
eq_(mapnik.Expression("[prop] = true").to_bool(f),False)
eq_(mapnik.Expression("[prop] != true").to_bool(f),True)
# https://github.com/mapnik/mapnik/issues/1806, fixed by https://github.com/mapnik/mapnik/issues/1872
def test_truthyness_comparision():
context = mapnik.Context()
f = mapnik.Feature(context,0)
f["prop"] = 1
eq_(mapnik.Expression("[prop]").to_bool(f),True)
eq_(mapnik.Expression("[prop] = false").to_bool(f),False)
eq_(mapnik.Expression("not [prop] != false").to_bool(f),False)
eq_(mapnik.Expression("not [prop] = true").to_bool(f),False)
eq_(mapnik.Expression("[prop] = true").to_bool(f),True)
eq_(mapnik.Expression("[prop] != true").to_bool(f),False)
if __name__ == "__main__":
run_all(eval(x) for x in dir() if x.startswith("test_"))
|
timabbott/zulip | refs/heads/master | zerver/migrations/0110_stream_is_in_zephyr_realm.py | 4 | # Generated by Django 1.11.5 on 2017-10-08 18:37
from django.db import migrations, models
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
def populate_is_zephyr(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Realm = apps.get_model("zerver", "Realm")
Stream = apps.get_model("zerver", "Stream")
realms = Realm.objects.filter(
string_id='zephyr',
)
for realm in realms:
Stream.objects.filter(
realm_id=realm.id,
).update(
is_in_zephyr_realm=True,
)
class Migration(migrations.Migration):
dependencies = [
('zerver', '0109_mark_tutorial_status_finished'),
]
operations = [
migrations.AddField(
model_name='stream',
name='is_in_zephyr_realm',
field=models.BooleanField(default=False),
),
migrations.RunPython(populate_is_zephyr,
reverse_code=migrations.RunPython.noop,
elidable=True),
]
|
gtoonstra/airflow | refs/heads/master | tests/dags/test_issue_1225.py | 15 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
DAG designed to test what happens when a DAG with pooled tasks is run
by a BackfillJob.
Addresses issue #1225.
"""
from datetime import datetime
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils.trigger_rule import TriggerRule
import time
DEFAULT_DATE = datetime(2016, 1, 1)
default_args = dict(
start_date=DEFAULT_DATE,
owner='airflow')
def fail():
raise ValueError('Expected failure.')
def delayed_fail():
"""
Delayed failure to make sure that processes are running before the error
is raised.
TODO handle more directly (without sleeping)
"""
time.sleep(5)
raise ValueError('Expected failure.')
# DAG tests backfill with pooled tasks
# Previously backfill would queue the task but never run it
dag1 = DAG(dag_id='test_backfill_pooled_task_dag', default_args=default_args)
dag1_task1 = DummyOperator(
task_id='test_backfill_pooled_task',
dag=dag1,
pool='test_backfill_pooled_task_pool',)
# DAG tests depends_on_past dependencies
dag2 = DAG(dag_id='test_depends_on_past', default_args=default_args)
dag2_task1 = DummyOperator(
task_id='test_dop_task',
dag=dag2,
depends_on_past=True,)
# DAG tests that a Dag run that doesn't complete is marked failed
dag3 = DAG(dag_id='test_dagrun_states_fail', default_args=default_args)
dag3_task1 = PythonOperator(
task_id='test_dagrun_fail',
dag=dag3,
python_callable=fail)
dag3_task2 = DummyOperator(
task_id='test_dagrun_succeed',
dag=dag3,)
dag3_task2.set_upstream(dag3_task1)
# DAG tests that a Dag run that completes but has a failure is marked success
dag4 = DAG(dag_id='test_dagrun_states_success', default_args=default_args)
dag4_task1 = PythonOperator(
task_id='test_dagrun_fail',
dag=dag4,
python_callable=fail,
)
dag4_task2 = DummyOperator(
task_id='test_dagrun_succeed',
dag=dag4,
trigger_rule=TriggerRule.ALL_FAILED
)
dag4_task2.set_upstream(dag4_task1)
# DAG tests that a Dag run that completes but has a root failure is marked fail
dag5 = DAG(dag_id='test_dagrun_states_root_fail', default_args=default_args)
dag5_task1 = DummyOperator(
task_id='test_dagrun_succeed',
dag=dag5,
)
dag5_task2 = PythonOperator(
task_id='test_dagrun_fail',
dag=dag5,
python_callable=fail,
)
# DAG tests that a Dag run that is deadlocked with no states is failed
dag6 = DAG(dag_id='test_dagrun_states_deadlock', default_args=default_args)
dag6_task1 = DummyOperator(
task_id='test_depends_on_past',
depends_on_past=True,
dag=dag6,)
dag6_task2 = DummyOperator(
task_id='test_depends_on_past_2',
depends_on_past=True,
dag=dag6,)
dag6_task2.set_upstream(dag6_task1)
# DAG tests that a deadlocked subdag is properly caught
dag7 = DAG(dag_id='test_subdag_deadlock', default_args=default_args)
subdag7 = DAG(dag_id='test_subdag_deadlock.subdag', default_args=default_args)
subdag7_task1 = PythonOperator(
task_id='test_subdag_fail',
dag=subdag7,
python_callable=fail)
subdag7_task2 = DummyOperator(
task_id='test_subdag_dummy_1',
dag=subdag7,)
subdag7_task3 = DummyOperator(
task_id='test_subdag_dummy_2',
dag=subdag7)
dag7_subdag1 = SubDagOperator(
task_id='subdag',
dag=dag7,
subdag=subdag7)
subdag7_task1.set_downstream(subdag7_task2)
subdag7_task2.set_downstream(subdag7_task3)
# DAG tests that a Dag run that doesn't complete but has a root failure is marked running
dag8 = DAG(dag_id='test_dagrun_states_root_fail_unfinished', default_args=default_args)
dag8_task1 = DummyOperator(
task_id='test_dagrun_unfinished', # The test will unset the task instance state after
# running this test
dag=dag8,
)
dag8_task2 = PythonOperator(
task_id='test_dagrun_fail',
dag=dag8,
python_callable=fail,
)
|
putinclassic/putic | refs/heads/master | qa/rpc-tests/mempool_coinbase_spends.py | 143 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].setgenerate(True, 4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(102, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = self.create_tx(coinbase_txids[0], node1_address, 50)
spend_102_raw = self.create_tx(coinbase_txids[1], node0_address, 50)
spend_103_raw = self.create_tx(coinbase_txids[2], node0_address, 50)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].setgenerate(True, 1)
# Create 102_1 and 103_1:
spend_102_1_raw = self.create_tx(spend_102_id, node1_address, 50)
spend_103_1_raw = self.create_tx(spend_103_id, node1_address, 50)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
self.nodes[0].setgenerate(True, 1)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), set([ spend_101_id, spend_102_1_id ]))
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
jeremiahmarks/sl4a | refs/heads/master | python/src/Lib/distutils/msvccompiler.py | 53 | """distutils.msvccompiler
Contains MSVCCompiler, an implementation of the abstract CCompiler class
for the Microsoft Visual Studio.
"""
# Written by Perry Stoll
# hacked by Robin Becker and Thomas Heller to do a better job of
# finding DevStudio (through the registry)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: msvccompiler.py 62197 2008-04-07 01:53:39Z mark.hammond $"
import sys, os, string
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils import log
_can_read_reg = 0
try:
import _winreg
_can_read_reg = 1
hkey_mod = _winreg
RegOpenKeyEx = _winreg.OpenKeyEx
RegEnumKey = _winreg.EnumKey
RegEnumValue = _winreg.EnumValue
RegError = _winreg.error
except ImportError:
try:
import win32api
import win32con
_can_read_reg = 1
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegError = win32api.error
except ImportError:
log.info("Warning: Can't read registry to find the "
"necessary compiler setting\n"
"Make sure that Python modules _winreg, "
"win32api or win32con are installed.")
pass
if _can_read_reg:
HKEYS = (hkey_mod.HKEY_USERS,
hkey_mod.HKEY_CURRENT_USER,
hkey_mod.HKEY_LOCAL_MACHINE,
hkey_mod.HKEY_CLASSES_ROOT)
def read_keys(base, key):
"""Return list of registry keys."""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
L = []
i = 0
while 1:
try:
k = RegEnumKey(handle, i)
except RegError:
break
L.append(k)
i = i + 1
return L
def read_values(base, key):
"""Return dict of registry keys and values.
All names are converted to lowercase.
"""
try:
handle = RegOpenKeyEx(base, key)
except RegError:
return None
d = {}
i = 0
while 1:
try:
name, value, type = RegEnumValue(handle, i)
except RegError:
break
name = name.lower()
d[convert_mbcs(name)] = convert_mbcs(value)
i = i + 1
return d
def convert_mbcs(s):
enc = getattr(s, "encode", None)
if enc is not None:
try:
s = enc("mbcs")
except UnicodeError:
pass
return s
class MacroExpander:
def __init__(self, version):
self.macros = {}
self.load_macros(version)
def set_macro(self, macro, path, key):
for base in HKEYS:
d = read_values(base, path)
if d:
self.macros["$(%s)" % macro] = d[key]
break
def load_macros(self, version):
vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
net = r"Software\Microsoft\.NETFramework"
self.set_macro("FrameworkDir", net, "installroot")
try:
if version > 7.0:
self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
else:
self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
except KeyError, exc: #
raise DistutilsPlatformError, \
("""Python was built with Visual Studio 2003;
extensions must be built with a compiler than can generate compatible binaries.
Visual Studio 2003 was not found on this system. If you have Cygwin installed,
you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
p = r"Software\Microsoft\NET Framework Setup\Product"
for base in HKEYS:
try:
h = RegOpenKeyEx(base, p)
except RegError:
continue
key = RegEnumKey(h, 0)
d = read_values(base, r"%s\%s" % (p, key))
self.macros["$(FrameworkVersion)"] = d["version"]
def sub(self, s):
for k, v in self.macros.items():
s = string.replace(s, k, v)
return s
def get_build_version():
"""Return the version of MSVC that was used to build Python.
For Python 2.3 and up, the version number is included in
sys.version. For earlier versions, assume the compiler is MSVC 6.
"""
prefix = "MSC v."
i = string.find(sys.version, prefix)
if i == -1:
return 6
i = i + len(prefix)
s, rest = sys.version[i:].split(" ", 1)
majorVersion = int(s[:-2]) - 6
minorVersion = int(s[2:3]) / 10.0
# I don't think paths are affected by minor version in version 6
if majorVersion == 6:
minorVersion = 0
if majorVersion >= 6:
return majorVersion + minorVersion
# else we don't know what version of the compiler this is
return None
def get_build_architecture():
"""Return the processor architecture.
Possible results are "Intel", "Itanium", or "AMD64".
"""
prefix = " bit ("
i = string.find(sys.version, prefix)
if i == -1:
return "Intel"
j = string.find(sys.version, ")", i)
return sys.version[i+len(prefix):j]
def normalize_and_reduce_paths(paths):
"""Return a list of normalized paths with duplicates removed.
The current order of paths is maintained.
"""
# Paths are normalized so things like: /a and /a/ aren't both preserved.
reduced_paths = []
for p in paths:
np = os.path.normpath(p)
# XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
if np not in reduced_paths:
reduced_paths.append(np)
return reduced_paths
class MSVCCompiler (CCompiler) :
"""Concrete class that implements an interface to Microsoft Visual C++,
as defined by the CCompiler abstract class."""
compiler_type = 'msvc'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.rc']
_mc_extensions = ['.mc']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions + _mc_extensions)
res_extension = '.res'
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self, verbose=0, dry_run=0, force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
self.__version = get_build_version()
self.__arch = get_build_architecture()
if self.__arch == "Intel":
# x86
if self.__version >= 7:
self.__root = r"Software\Microsoft\VisualStudio"
self.__macros = MacroExpander(self.__version)
else:
self.__root = r"Software\Microsoft\Devstudio"
self.__product = "Visual Studio version %s" % self.__version
else:
# Win64. Assume this was built with the platform SDK
self.__product = "Microsoft SDK compiler %s" % (self.__version + 6)
self.initialized = False
def initialize(self):
self.__paths = []
if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
# Assume that the SDK set up everything alright; don't try to be
# smarter
self.cc = "cl.exe"
self.linker = "link.exe"
self.lib = "lib.exe"
self.rc = "rc.exe"
self.mc = "mc.exe"
else:
self.__paths = self.get_msvc_paths("path")
if len (self.__paths) == 0:
raise DistutilsPlatformError, \
("Python was built with %s, "
"and extensions need to be built with the same "
"version of the compiler, but it isn't installed." % self.__product)
self.cc = self.find_exe("cl.exe")
self.linker = self.find_exe("link.exe")
self.lib = self.find_exe("lib.exe")
self.rc = self.find_exe("rc.exe") # resource compiler
self.mc = self.find_exe("mc.exe") # message compiler
self.set_path_env_var('lib')
self.set_path_env_var('include')
# extend the MSVC path with the current path
try:
for p in string.split(os.environ['path'], ';'):
self.__paths.append(p)
except KeyError:
pass
self.__paths = normalize_and_reduce_paths(self.__paths)
os.environ['path'] = string.join(self.__paths, ';')
self.preprocess_options = None
if self.__arch == "Intel":
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
'/Z7', '/D_DEBUG']
else:
# Win64
self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
'/DNDEBUG']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
'/Z7', '/D_DEBUG']
self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
if self.__version >= 7:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
]
else:
self.ldflags_shared_debug = [
'/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
]
self.ldflags_static = [ '/nologo']
self.initialized = True
# -- Worker methods ------------------------------------------------
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
# Copied from ccompiler.py, extended to return .res as 'object'-file
# for .rc input file
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
(base, ext) = os.path.splitext (src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
# Better to raise an exception instead of silently continuing
# and later complain about sources and targets having
# different lengths
raise CompileError ("Don't know how to compile %s" % src_name)
if strip_dir:
base = os.path.basename (base)
if ext in self._rc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
elif ext in self._mc_extensions:
obj_names.append (os.path.join (output_dir,
base + self.res_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
if not self.initialized: self.initialize()
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('/c')
if debug:
compile_opts.extend(self.compile_options_debug)
else:
compile_opts.extend(self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
if debug:
# pass the full pathname to MSVC in debug mode,
# this allows the debugger to find the source file
# without asking the user to browse for it
src = os.path.abspath(src)
if ext in self._c_extensions:
input_opt = "/Tc" + src
elif ext in self._cpp_extensions:
input_opt = "/Tp" + src
elif ext in self._rc_extensions:
# compile .RC to .RES file
input_opt = src
output_opt = "/fo" + obj
try:
self.spawn ([self.rc] + pp_opts +
[output_opt] + [input_opt])
except DistutilsExecError, msg:
raise CompileError, msg
continue
elif ext in self._mc_extensions:
# Compile .MC to .RC file to .RES file.
# * '-h dir' specifies the directory for the
# generated include file
# * '-r dir' specifies the target directory of the
# generated RC file and the binary message resource
# it includes
#
# For now (since there are no options to change this),
# we use the source-directory for the include file and
# the build directory for the RC file and message
# resources. This works at least for win32all.
h_dir = os.path.dirname (src)
rc_dir = os.path.dirname (obj)
try:
# first compile .MC to .RC and .H file
self.spawn ([self.mc] +
['-h', h_dir, '-r', rc_dir] + [src])
base, _ = os.path.splitext (os.path.basename (src))
rc_file = os.path.join (rc_dir, base + '.rc')
# then compile .RC to .RES file
self.spawn ([self.rc] +
["/fo" + obj] + [rc_file])
except DistutilsExecError, msg:
raise CompileError, msg
continue
else:
# how to handle this file?
raise CompileError (
"Don't know how to compile %s to %s" % \
(src, obj))
output_opt = "/Fo" + obj
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
if not self.initialized: self.initialize()
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = objects + ['/OUT:' + output_filename]
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
if not self.initialized: self.initialize()
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ str (runtime_library_dirs))
lib_opts = gen_lib_options (self,
library_dirs, runtime_library_dirs,
libraries)
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
if target_desc == CCompiler.EXECUTABLE:
if debug:
ldflags = self.ldflags_shared_debug[1:]
else:
ldflags = self.ldflags_shared[1:]
else:
if debug:
ldflags = self.ldflags_shared_debug
else:
ldflags = self.ldflags_shared
export_opts = []
for sym in (export_symbols or []):
export_opts.append("/EXPORT:" + sym)
ld_args = (ldflags + lib_opts + export_opts +
objects + ['/OUT:' + output_filename])
# The MSVC linker generates .lib and .exp files, which cannot be
# suppressed by any linker switches. The .lib files may even be
# needed! Make sure they are generated in the temporary build
# directory. Since they have different names for debug and release
# builds, they can go into the same directory.
if export_symbols is not None:
(dll_name, dll_ext) = os.path.splitext(
os.path.basename(output_filename))
implib_file = os.path.join(
os.path.dirname(objects[0]),
self.library_filename(dll_name))
ld_args.append ('/IMPLIB:' + implib_file)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option (self, dir):
return "/LIBPATH:" + dir
def runtime_library_dir_option (self, dir):
raise DistutilsPlatformError, \
"don't know how to set runtime library search path for MSVC++"
def library_option (self, lib):
return self.library_filename (lib)
def find_library_file (self, dirs, lib, debug=0):
# Prefer a debugging library if found (and requested), but deal
# with it if we don't have one.
if debug:
try_names = [lib + "_d", lib]
else:
try_names = [lib]
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename (name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# find_library_file ()
# Helper methods for using the MSVC registry settings
def find_exe(self, exe):
"""Return path to an MSVC executable program.
Tries to find the program in several places: first, one of the
MSVC program search paths from the registry; next, the directories
in the PATH environment variable. If any of those work, return an
absolute path that is known to exist. If none of them work, just
return the original program name, 'exe'.
"""
for p in self.__paths:
fn = os.path.join(os.path.abspath(p), exe)
if os.path.isfile(fn):
return fn
# didn't find it; try existing path
for p in string.split(os.environ['Path'],';'):
fn = os.path.join(os.path.abspath(p),exe)
if os.path.isfile(fn):
return fn
return exe
def get_msvc_paths(self, path, platform='x86'):
"""Get a list of devstudio directories (include, lib or path).
Return a list of strings. The list will be empty if unable to
access the registry or appropriate registry keys not found.
"""
if not _can_read_reg:
return []
path = path + " dirs"
if self.__version >= 7:
key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
% (self.__root, self.__version))
else:
key = (r"%s\6.0\Build System\Components\Platforms"
r"\Win32 (%s)\Directories" % (self.__root, platform))
for base in HKEYS:
d = read_values(base, key)
if d:
if self.__version >= 7:
return string.split(self.__macros.sub(d[path]), ";")
else:
return string.split(d[path], ";")
# MSVC 6 seems to create the registry entries we need only when
# the GUI is run.
if self.__version == 6:
for base in HKEYS:
if read_values(base, r"%s\6.0" % self.__root) is not None:
self.warn("It seems you have Visual Studio 6 installed, "
"but the expected registry settings are not present.\n"
"You must at least run the Visual Studio GUI once "
"so that these entries are created.")
break
return []
def set_path_env_var(self, name):
"""Set environment variable 'name' to an MSVC path type value.
This is equivalent to a SET command prior to execution of spawned
commands.
"""
if name == "lib":
p = self.get_msvc_paths("library")
else:
p = self.get_msvc_paths(name)
if p:
os.environ[name] = string.join(p, ';')
if get_build_version() >= 8.0:
log.debug("Importing new compiler from distutils.msvc9compiler")
OldMSVCCompiler = MSVCCompiler
from distutils.msvc9compiler import MSVCCompiler
# get_build_architecture not really relevant now we support cross-compile
from distutils.msvc9compiler import MacroExpander
|
aminghadersohi/airflow | refs/heads/master | airflow/ti_deps/deps/dagrun_exists_dep.py | 58 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
from airflow.utils.state import State
class DagrunRunningDep(BaseTIDep):
NAME = "Dagrun Running"
IGNOREABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
dag = ti.task.dag
dagrun = ti.get_dagrun(session)
if not dagrun:
# The import is needed here to avoid a circular dependency
from airflow.models import DagRun
running_dagruns = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
if len(running_dagruns) >= dag.max_active_runs:
reason = ("The maximum number of active dag runs ({0}) for this task "
"instance's DAG '{1}' has been reached.".format(
dag.max_active_runs,
ti.dag_id))
else:
reason = "Unknown reason"
yield self._failing_status(
reason="Task instance's dagrun did not exist: {0}.".format(reason))
else:
if dagrun.state != State.RUNNING:
yield self._failing_status(
reason="Task instance's dagrun was not in the 'running' state but in "
"the state '{}'.".format(dagrun.state))
|
ankurjimmy/catawampus | refs/heads/master | tr/vendor/bup/lib/tornado/ioloop.py | 8 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A level-triggered I/O loop for non-blocking sockets."""
import bisect
import errno
import os
import logging
import select
import time
import traceback
try:
import signal
except ImportError:
signal = None
try:
import fcntl
except ImportError:
if os.name == 'nt':
import win32_support
import win32_support as fcntl
else:
raise
class IOLoop(object):
"""A level-triggered I/O loop.
We use epoll if it is available, or else we fall back on select(). If
you are implementing a system that needs to handle 1000s of simultaneous
connections, you should use Linux and either compile our epoll module or
use Python 2.6+ to get epoll support.
Example usage for a simple TCP server:
import errno
import functools
import ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error, e:
if e[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = ioloop.IOLoop.instance()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP | _EPOLLRDHUP
def __init__(self, impl=None):
self._impl = impl or _poll()
if hasattr(self._impl, 'fileno'):
self._set_close_exec(self._impl.fileno())
self._handlers = {}
self._events = {}
self._callbacks = set()
self._timeouts = []
self._running = False
self._stopped = False
self._blocking_log_threshold = None
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
if os.name != 'nt':
r, w = os.pipe()
self._set_nonblocking(r)
self._set_nonblocking(w)
self._set_close_exec(r)
self._set_close_exec(w)
self._waker_reader = os.fdopen(r, "r", 0)
self._waker_writer = os.fdopen(w, "w", 0)
else:
self._waker_reader = self._waker_writer = win32_support.Pipe()
r = self._waker_writer.reader_fd
self.add_handler(r, self._read_waker, self.READ)
@classmethod
def instance(cls):
"""Returns a global IOLoop instance.
Most single-threaded applications have a single, global IOLoop.
Use this method instead of passing around IOLoop instances
throughout your code.
A common pattern for classes that depend on IOLoops is to use
a default argument to enable programs with multiple IOLoops
but not require the argument for simpler applications:
class MyClass(object):
def __init__(self, io_loop=None):
self.io_loop = io_loop or IOLoop.instance()
"""
if not hasattr(cls, "_instance"):
cls._instance = cls()
return cls._instance
@classmethod
def initialized(cls):
return hasattr(cls, "_instance")
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for fd."""
self._handlers[fd] = handler
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
"""Changes the events we listen for fd."""
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
"""Stop listening for events on fd."""
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except (OSError, IOError):
logging.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_log_threshold(self, s):
"""Logs a stack trace if the ioloop is blocked for more than s seconds.
Pass None to disable. Requires python 2.6 on a unixy platform.
"""
if not hasattr(signal, "setitimer"):
logging.error("set_blocking_log_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_log_threshold = s
if s is not None:
signal.signal(signal.SIGALRM, self._handle_alarm)
def _handle_alarm(self, signal, frame):
logging.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_log_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the I/O handlers calls stop(), which
will make the loop stop after the current event iteration completes.
"""
if self._stopped:
self._stopped = False
return
self._running = True
while True:
# Never use an infinite timeout here - it can stall epoll
poll_timeout = 0.2
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
callbacks = list(self._callbacks)
for callback in callbacks:
# A callback can add or remove other callbacks
if callback in self._callbacks:
self._callbacks.remove(callback)
self._run_callback(callback)
if self._callbacks:
poll_timeout = 0.0
if self._timeouts:
now = time.time()
while self._timeouts and self._timeouts[0].deadline <= now:
timeout = self._timeouts.pop(0)
self._run_callback(timeout.callback)
if self._timeouts:
milliseconds = self._timeouts[0].deadline - now
poll_timeout = min(milliseconds, poll_timeout)
if not self._running:
break
if self._blocking_log_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception, e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if (getattr(e, 'errno') == errno.EINTR or
(isinstance(getattr(e, 'args'), tuple) and
len(e.args) == 2 and e.args[0] == errno.EINTR)):
logging.warning("Interrupted system call", exc_info=1)
continue
else:
raise
if self._blocking_log_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_log_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
self._handlers[fd](fd, events)
except (KeyboardInterrupt, SystemExit):
raise
except (OSError, IOError), e:
if e[0] == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
logging.error("Exception in I/O handler for fd %d",
fd, exc_info=True)
except:
logging.error("Exception in I/O handler for fd %d",
fd, exc_info=True)
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_log_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
def stop(self):
"""Stop the loop after the current event loop iteration is complete.
If the event loop is not currently running, the next call to start()
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this:
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
ioloop.start() will return after async_method has run its callback,
whether that callback was invoked before or after ioloop.start.
"""
self._running = False
self._stopped = True
self._wake()
def running(self):
"""Returns true if this IOLoop is currently running."""
return self._running
def add_timeout(self, deadline, callback):
"""Calls the given callback at the time deadline from the I/O loop."""
timeout = _Timeout(deadline, callback)
bisect.insort(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
self._timeouts.remove(timeout)
def add_callback(self, callback):
"""Calls the given callback on the next I/O loop iteration."""
self._callbacks.add(callback)
self._wake()
def remove_callback(self, callback):
"""Removes the given callback from the next I/O loop iteration."""
self._callbacks.remove(callback)
def _wake(self):
try:
self._waker_writer.write("x")
except IOError:
pass
def _run_callback(self, callback):
try:
callback()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the IOLoop
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in sys.exc_info.
"""
logging.error("Exception in callback %r", callback, exc_info=True)
def _read_waker(self, fd, events):
try:
while True:
self._waker_reader.read()
except IOError:
pass
def _set_nonblocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def _set_close_exec(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback']
def __init__(self, deadline, callback):
self.deadline = deadline
self.callback = callback
def __cmp__(self, other):
return cmp((self.deadline, id(self.callback)),
(other.deadline, id(other.callback)))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every callback_time milliseconds.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.instance()
self._running = True
def start(self):
timeout = time.time() + self.callback_time / 1000.0
self.io_loop.add_timeout(timeout, self._run)
def stop(self):
self._running = False
def _run(self):
if not self._running: return
try:
self.callback()
except (KeyboardInterrupt, SystemExit):
raise
except:
logging.error("Error in periodic callback", exc_info=True)
self.start()
class _EPoll(object):
"""An epoll-based event loop using our C module for Python 2.5 systems"""
_EPOLL_CTL_ADD = 1
_EPOLL_CTL_DEL = 2
_EPOLL_CTL_MOD = 3
def __init__(self):
self._epoll_fd = epoll.epoll_create()
def fileno(self):
return self._epoll_fd
def register(self, fd, events):
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_ADD, fd, events)
def modify(self, fd, events):
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_MOD, fd, events)
def unregister(self, fd):
epoll.epoll_ctl(self._epoll_fd, self._EPOLL_CTL_DEL, fd, 0)
def poll(self, timeout):
return epoll.epoll_wait(self._epoll_fd, int(timeout * 1000))
class _KQueue(object):
"""A kqueue-based event loop for BSD/Mac systems."""
def __init__(self):
self._kqueue = select.kqueue()
self._active = {}
def fileno(self):
return self._kqueue.fileno()
def register(self, fd, events):
self._control(fd, events, select.KQ_EV_ADD)
self._active[fd] = events
def modify(self, fd, events):
self.unregister(fd)
self.register(fd, events)
def unregister(self, fd):
events = self._active.pop(fd)
self._control(fd, events, select.KQ_EV_DELETE)
def _control(self, fd, events, flags):
kevents = []
if events & IOLoop.WRITE:
kevents.append(select.kevent(
fd, filter=select.KQ_FILTER_WRITE, flags=flags))
if events & IOLoop.READ or not kevents:
# Always read when there is not a write
kevents.append(select.kevent(
fd, filter=select.KQ_FILTER_READ, flags=flags))
# Even though control() takes a list, it seems to return EINVAL
# on Mac OS X (10.6) when there is more than one event in the list.
for kevent in kevents:
self._kqueue.control([kevent], 0)
def poll(self, timeout):
kevents = self._kqueue.control(None, 1000, timeout)
events = {}
for kevent in kevents:
fd = kevent.ident
flags = 0
if kevent.filter == select.KQ_FILTER_READ:
events[fd] = events.get(fd, 0) | IOLoop.READ
if kevent.filter == select.KQ_FILTER_WRITE:
events[fd] = events.get(fd, 0) | IOLoop.WRITE
if kevent.flags & select.KQ_EV_ERROR:
events[fd] = events.get(fd, 0) | IOLoop.ERROR
return events.items()
class _Select(object):
"""A simple, select()-based IOLoop implementation for non-Linux systems"""
def __init__(self):
self.read_fds = set()
self.write_fds = set()
self.error_fds = set()
self.fd_sets = (self.read_fds, self.write_fds, self.error_fds)
def register(self, fd, events):
if events & IOLoop.READ: self.read_fds.add(fd)
if events & IOLoop.WRITE: self.write_fds.add(fd)
if events & IOLoop.ERROR: self.error_fds.add(fd)
def modify(self, fd, events):
self.unregister(fd)
self.register(fd, events)
def unregister(self, fd):
self.read_fds.discard(fd)
self.write_fds.discard(fd)
self.error_fds.discard(fd)
def poll(self, timeout):
readable, writeable, errors = select.select(
self.read_fds, self.write_fds, self.error_fds, timeout)
events = {}
for fd in readable:
events[fd] = events.get(fd, 0) | IOLoop.READ
for fd in writeable:
events[fd] = events.get(fd, 0) | IOLoop.WRITE
for fd in errors:
events[fd] = events.get(fd, 0) | IOLoop.ERROR
return events.items()
# Choose a poll implementation. Use epoll if it is available, fall back to
# select() for non-Linux platforms
if hasattr(select, "epoll"):
# Python 2.6+ on Linux
_poll = select.epoll
elif hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
_poll = _KQueue
else:
try:
# Linux systems with our C module installed
import epoll
_poll = _EPoll
except:
# All other systems
import sys
if "linux" in sys.platform:
logging.warning("epoll module not found; using select()")
_poll = _Select
|
ctmunwebmaster/huxley | refs/heads/master | fabfile/dependencies.py | 2 | # Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import json
from os.path import join
from fabric.api import abort, env, hide, lcd, local, task
from fabric.contrib.console import confirm
from .utils import ui
@task(default=True)
def check(lang='all'):
'''Check installed dependencies against the requirements files.'''
if lang in ('python', 'py', 'all'):
check_python()
if lang in ('js', 'all'):
check_js()
@task
def update(lang='all'):
'''Update dependencies based on the requirements files.'''
if lang in ('python', 'py', 'all'):
update_python()
if lang in ('js', 'all'):
update_js()
def check_python():
'''Check/update the installed dependencies against requirements.txt.'''
print ui.info('Checking python dependencies...')
with open(join(env.huxley_root, 'requirements.txt'), 'r') as f:
requirements = f.read()
with hide('running'):
installed = local('pip freeze', capture=True)
def parse(text):
pairs = map(lambda l: l.split('=='), text.strip().split('\n'))
return {pair[0]: pair[1] for pair in pairs}
expected, actual = parse(requirements), parse(installed)
if not check_versions(expected, actual):
if confirm('Update dependences?'):
update_python()
def update_python():
'''Update python dependences with pip.'''
print 'Updating python dependencies...'
with lcd(env.huxley_root):
local('pip install -r requirements.txt')
def check_js():
'''Check the installed dependencies against package.json.'''
print ui.info('Checking JS dependencies...')
with hide('running'):
if not local('which npm', capture=True):
print ui.error('npm not found! Install it with `brew install npm`.')
return
with open(join(env.huxley_root, 'package.json'), 'r') as p:
package = json.loads(p.read())
required = package['dependencies']
with lcd(env.huxley_root), hide('running'):
npm_list = json.loads(local('npm list --json', capture=True))
modules = npm_list.get('dependencies', {}).items()
installed = {name: info['version'] for name, info in modules}
if not check_versions(required, installed):
if confirm('Update dependencies?'):
update_js()
def update_js():
'''Update JS dependencies with npm.'''
print 'Updating JS dependencies...'
with lcd(env.huxley_root):
local('npm install')
def check_versions(expected, actual):
'''Check module versions and print a table of mismatches.'''
if all(actual.get(name) == version for name, version in expected.items()):
print ui.success('Dependencies are up-to-date.')
return True
print ui.warning('Dependencies are out of date!')
row_format ="{:<25}" * 3
print '\n', row_format.format('module', 'required', 'installed')
print row_format.format('------', '--------', '---------')
for module in sorted(expected.keys()):
expected_version = expected[module]
actual_version = actual.get(module, ui.warning('none'))
if expected_version != actual_version:
print row_format.format(module, expected_version, actual_version)
print row_format.format('------', '--------', '---------'), '\n'
return False
|
yanggg1133/amdeb-amazon | refs/heads/master | amdeb_amazon/models/product_operation.py | 2 | # -*- coding: utf-8 -*-
from openerp import models, fields
from ..model_names.product_operation import PRODUCT_OPERATION_TABLE
class ProductOperation(models.Model):
""" add a column to store synchronization status """
_inherit = [PRODUCT_OPERATION_TABLE]
# This is set by Sync process regardless the sync results
amazon_sync_timestamp = fields.Datetime(
string='Amazon Synchronization Timestamp',
readonly=True,
)
|
pietroalbini/botogram | refs/heads/master | botogram/hooks.py | 1 | # Copyright (c) 2015-2019 The Botogram Authors (see AUTHORS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
from inspect import Parameter
import json
import logbook
import re
from time import time
from .callbacks import hashed_callback_name
from .context import Context
from .converters import _parameters_conversion
from .objects.messages import Message
logger = logbook.Logger("botogram hook")
class Hook:
"""Base class for all the hooks"""
_only_texts = False
_botogram_hook = True
def __init__(self, func, component, args=None):
prefix = ""
if component.component_name:
prefix = component.component_name + "::"
self.func = func
self.name = prefix + func.__name__
self.component = component
self.component_id = component._component_id
self._args = args
self._after_init(args)
def __reduce__(self):
return rebuild, (self.__class__, self.func, self.component, self._args)
def __repr__(self):
return "<" + self.__class__.__name__ + " \"" + self.name + "\">"
def _after_init(self, args):
"""Prepare the object"""
pass
def call(self, bot, update):
"""Call the hook"""
with Context(bot, self, update):
if self._only_texts and update.message.text is None:
return
return self._call(bot, update)
def _call(self, bot, update):
"""*Actually* call the hook"""
message = update.message
return bot._call(self.func, self.component_id, chat=message.chat,
message=message)
def rebuild(cls, func, component, args):
hook = cls(func, component, args)
return hook
class BeforeProcessingHook(Hook):
"""Underlying hook for @bot.process_message"""
pass
class ProcessMessageHook(Hook):
"""Underlying hook for @bot.process_message"""
pass
class PollUpdateHook(Hook):
"""Underlying hook for @bot.poll_update"""
def _call(self, bot, update):
return bot._call(self.func, self.component_id, poll=update.poll)
class MemoryPreparerHook(Hook):
"""Underlying hook for @bot.prepare_memory"""
def call(self, memory):
return self.func(memory)
class NoCommandsHook(Hook):
"""Underlying hook for an internal event"""
pass
class MessageEqualsHook(Hook):
"""Underlying hook for @bot.message_equals"""
_only_texts = True
def _after_init(self, args):
if args["ignore_case"]:
self._string = args["string"].lower()
else:
self._string = args["string"]
def _prepare(self, update):
message = update.message
text = message.text
if self._args["ignore_case"]:
text = text.lower()
return message, text
def _call(self, bot, update):
message, text = self._prepare(update)
if text != self._string:
return
return bot._call(self.func, self.component_id, chat=message.chat,
message=message)
class MessageContainsHook(MessageEqualsHook):
"""Underlying hook for @bot.message_contains"""
_only_texts = True
def _call(self, bot, update):
message, text = self._prepare(update)
splitted = text.split(" ")
res = []
for one in splitted:
if one != self._string:
continue
result = bot._call(self.func, self.component_id, chat=message.chat,
message=message)
res.append(result)
if not self._args["multiple"]:
break
return len(res) > 0
class MessageMatchesHook(Hook):
"""Underlying hook for @bot.message_matches"""
_only_texts = True
def _after_init(self, args):
self._regex = re.compile(args["regex"], flags=args["flags"])
def _call(self, bot, update):
message = update.message
results = self._regex.finditer(message.text)
found = False
for result in results:
found = True
bot._call(self.func, self.component_id, chat=message.chat,
message=message, matches=result.groups())
if not self._args["multiple"]:
break
return found
_command_args_split_re = re.compile(r' +')
class CommandHook(Hook):
"""Underlying hook for @bot.command"""
_only_texts = True
def _after_init(self, args):
# Check if the command name is valid
if not re.match(r'^[a-zA-Z0-9_]+$', args["name"]):
raise ValueError("Invalid command name: %s" % args["name"])
# This regex will match all commands pointed to this bot
self._regex = re.compile(r'^\/' + args["name"] + r'(@[a-zA-Z0-9_]+)?'
r'( .*)?$')
self._name = args["name"]
self._hidden = False
if "hidden" in args:
self._hidden = args["hidden"]
self._order = 0
if "order" in args:
self._order = args["order"]
self._parameters = None
if "parameters" in args:
self._parameters = args["parameters"]
def _call(self, bot, update):
message = update.message
text = message.text.replace("\n", " ").replace("\t", " ")
# Must be the correct command for the correct bot
match = self._regex.match(text)
if not match:
return
if match.group(1) and match.group(1) != "@" + bot.itself.username:
return
args = _command_args_split_re.split(text)[1:]
params = {}
if self._parameters:
for index, parameter in enumerate(self._parameters.values()):
try:
if parameter.annotation is Parameter.empty:
params[parameter.name] = args[index]
else:
params[parameter.name] = _parameters_conversion(
parameter.annotation, args[index],
parameter.name)
except IndexError:
if parameter.default is Parameter.empty:
logger.debug("A value for the parameter %s "
"is missing" % parameter.name)
return True
except Exception as error:
logger.debug(error)
return True
bot._call(self.func, self.component_id, chat=message.chat,
message=message, args=args, **params)
return True
class CallbackHook(Hook):
"""Underlying hook for @bot.callback"""
def _after_init(self, args):
self._name = hashed_callback_name(
"%s:%s" % (self.component.component_name, args["name"])
)
def call(self, bot, update, name, data):
with Context(bot, self, update):
if not update.callback_query:
return
q = update.callback_query
if name != self._name:
return
if q.is_inline:
args = {
"inline_message_id": q.inline_message_id,
}
message = Message(data=args, api=q._api)
chat = q.chat_instance
else:
message = q.message
chat = q.message.chat
bot._call(
self.func, self.component_id, query=q, chat=chat,
message=message, data=data, is_inline=q.is_inline
)
update.callback_query._maybe_send_noop()
return True
class InlineHook(Hook):
"""Underlying hook for @bot.inline"""
def _after_init(self, args):
self.cache = args["cache"]
self.private = args["private"]
self.paginate = args["paginate"]
def _reset_pagination(self, bot, inline, sender, query):
inline.cache = self.cache
inline.private = self.private
inline.paginate = self.paginate
generator = bot._call(self.func,
self.component_id,
inline=inline,
sender=sender,
query=query)
if not bot._inline_paginate.get(sender.id, False):
bot._inline_paginate[sender.id] = dict()
bot._inline_paginate[sender.id][query] = [
generator,
0, # First offset
time(), # Last update time
]
def _call(self, bot, update):
inline = update.inline_query
sender = inline.sender
query = inline.query
if sender.id not in bot._inline_paginate or \
query not in bot._inline_paginate[sender.id] or \
inline.offset == '':
self._reset_pagination(bot, inline, sender, query)
generator = bot._inline_paginate[sender.id][query][0]
offset = bot._inline_paginate[sender.id][query][1]
results = []
i = offset
next_offset = offset + 1
while i < next_offset:
try:
element = next(generator)
except StopIteration:
break
element['id'] = i
results.append(element)
i += 1
hook_locals = generator.gi_frame.f_locals["inline"]
next_offset = offset + hook_locals.paginate
cache = hook_locals.cache
is_private = hook_locals.private
if len(results) == 0:
# No more results, don't do anything
return
bot._inline_paginate[sender.id][query][1] = next_offset
bot._inline_paginate[sender.id][query][2] = time()
args = {
"inline_query_id": inline.id,
"cache_time": cache,
"is_personal": is_private,
"results": json.dumps(results),
"next_offset": next_offset,
}
if hook_locals._switch_pm_text is not None:
args["switch_pm_text"] = hook_locals._switch_pm_text
if hook_locals._switch_pm_parameter is not None:
args["switch_pm_parameter"] = hook_locals._switch_pm_parameter
return bot.api.call("answerInlineQuery", args)
class ChosenInlineHook(Hook):
"""Underlying hook for @bot.inline_feedback"""
def _call(self, bot, update):
bot._call(self.func, feedback=update.chosen_inline_result)
class ChatUnavailableHook(Hook):
"""Underlying hook for @bot.chat_unavailable"""
def call(self, bot, chat_id, reason):
with Context(bot, self, None):
return bot._call(self.func, self.component_id, chat_id=chat_id,
reason=reason)
class MessageEditedHook(Hook):
"""Underlying hook for @bot.message_edited"""
def _call(self, bot, update):
message = update.edited_message
return bot._call(self.func, self.component_id, chat=message.chat,
message=message)
class ChannelPostHook(Hook):
"""Underlying hook for @bot.channel_post"""
def _call(self, bot, update):
message = update.channel_post
return bot._call(self.func, self.component_id, chat=message.chat,
message=message)
class EditedChannelPostHook(Hook):
"""Underlying hook for @bot.channel_post_edited"""
def _call(self, bot, update):
message = update.edited_channel_post
return bot._call(self.func, self.component_id, chat=message.chat,
message=message)
class TimerHook(Hook):
"""Underlying hook for a timer"""
def call(self, bot):
with Context(bot, self, None):
return bot._call(self.func, self.component_id)
|
zbqf109/goodo | refs/heads/master | openerp/netsvc.py | 1 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import logging.handlers
import os
import platform
import pprint
import release
import sys
import threading
import psycopg2
import openerp
import sql_db
import tools
_logger = logging.getLogger(__name__)
def log(logger, level, prefix, msg, depth=None):
indent=''
indent_after=' '*len(prefix)
for line in (prefix + pprint.pformat(msg, depth=depth)).split('\n'):
logger.log(level, indent+line)
indent=indent_after
def LocalService(name):
"""
The openerp.netsvc.LocalService() function is deprecated. It still works
in two cases: workflows and reports. For workflows, instead of using
LocalService('workflow'), openerp.workflow should be used (better yet,
methods on openerp.osv.orm.Model should be used). For reports,
openerp.report.render_report() should be used (methods on the Model should
be provided too in the future).
"""
assert openerp.conf.deprecation.allow_local_service
_logger.warning("LocalService() is deprecated since march 2013 (it was called with '%s')." % name)
if name == 'workflow':
return openerp.workflow
if name.startswith('report.'):
report = openerp.report.interface.report_int._reports.get(name)
if report:
return report
else:
dbname = getattr(threading.currentThread(), 'dbname', None)
if dbname:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
return registry['ir.actions.report.xml']._lookup_report(cr, name[len('report.'):])
path_prefix = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
class PostgreSQLHandler(logging.Handler):
""" PostgreSQL Loggin Handler will store logs in the database, by default
the current database, can be set using --log-db=DBNAME
"""
def emit(self, record):
ct = threading.current_thread()
ct_db = getattr(ct, 'dbname', None)
dbname = tools.config['log_db'] if tools.config['log_db'] and tools.config['log_db'] != '%d' else ct_db
if not dbname:
return
with tools.ignore(Exception), tools.mute_logger('openerp.sql_db'), sql_db.db_connect(dbname, allow_uri=True).cursor() as cr:
try:
cr.autocommit(True)
except AttributeError:
# TODO: adapt to other databases than `postgresql`
cr.connection.set_isolation_level(0)
msg = tools.ustr(record.msg)
if record.args:
msg = msg % record.args
traceback = getattr(record, 'exc_text', '')
if traceback:
msg = "%s\n%s" % (msg, traceback)
# we do not use record.levelname because it may have been changed by ColoredFormatter.
levelname = logging.getLevelName(record.levelno)
val = ('server', ct_db, record.name, levelname, msg, record.pathname[len(path_prefix)+1:], record.lineno, record.funcName)
cr.execute("""
INSERT INTO ir_logging(create_date, type, dbname, name, level, message, path, line, func)
VALUES (NOW() at time zone 'UTC', %s, %s, %s, %s, %s, %s, %s, %s)
""", val)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, _NOTHING, DEFAULT = range(10)
#The background is set with 40 plus the number of the color, and the foreground with 30
#These are the sequences need to get colored ouput
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLOR_PATTERN = "%s%s%%s%s" % (COLOR_SEQ, COLOR_SEQ, RESET_SEQ)
LEVEL_COLOR_MAPPING = {
logging.DEBUG: (BLUE, DEFAULT),
logging.INFO: (GREEN, DEFAULT),
logging.WARNING: (YELLOW, DEFAULT),
logging.ERROR: (RED, DEFAULT),
logging.CRITICAL: (WHITE, RED),
}
class DBFormatter(logging.Formatter):
def format(self, record):
record.pid = os.getpid()
record.dbname = getattr(threading.currentThread(), 'dbname', '?')
return logging.Formatter.format(self, record)
class ColoredFormatter(DBFormatter):
def format(self, record):
fg_color, bg_color = LEVEL_COLOR_MAPPING.get(record.levelno, (GREEN, DEFAULT))
record.levelname = COLOR_PATTERN % (30 + fg_color, 40 + bg_color, record.levelname)
return DBFormatter.format(self, record)
_logger_init = False
def init_logger():
global _logger_init
if _logger_init:
return
_logger_init = True
logging.addLevelName(25, "INFO")
from tools.translate import resetlocale
resetlocale()
# create a format for log messages and dates
format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s'
# Normal Handler on stderr
handler = logging.StreamHandler()
if tools.config['syslog']:
# SysLog Handler
if os.name == 'nt':
handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
elif platform.system() == 'Darwin':
handler = logging.handlers.SysLogHandler('/var/run/log')
else:
handler = logging.handlers.SysLogHandler('/dev/log')
format = '%s %s' % (release.description, release.version) \
+ ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'
elif tools.config['logfile']:
# LogFile Handler
logf = tools.config['logfile']
try:
# We check we have the right location for the log files
dirname = os.path.dirname(logf)
if dirname and not os.path.isdir(dirname):
os.makedirs(dirname)
if tools.config['logrotate'] is not False:
handler = logging.handlers.TimedRotatingFileHandler(filename=logf, when='D', interval=1, backupCount=30)
elif os.name == 'posix':
handler = logging.handlers.WatchedFileHandler(logf)
else:
handler = logging.FileHandler(logf)
except Exception:
sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")
# Check that handler.stream has a fileno() method: when running OpenERP
# behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
# which has no fileno() method. (mod_wsgi.Log is what is being bound to
# sys.stderr when the logging.StreamHandler is being constructed above.)
def is_a_tty(stream):
return hasattr(stream, 'fileno') and os.isatty(stream.fileno())
if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
formatter = ColoredFormatter(format)
else:
formatter = DBFormatter(format)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
if tools.config['log_db']:
db_levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
postgresqlHandler = PostgreSQLHandler()
postgresqlHandler.setLevel(int(db_levels.get(tools.config['log_db_level'], tools.config['log_db_level'])))
logging.getLogger().addHandler(postgresqlHandler)
# Configure loggers levels
pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])
logconfig = tools.config['log_handler']
logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
for logconfig_item in logging_configurations:
loggername, level = logconfig_item.split(':')
level = getattr(logging, level, logging.INFO)
logger = logging.getLogger(loggername)
logger.setLevel(level)
for logconfig_item in logging_configurations:
_logger.debug('logger level set: "%s"', logconfig_item)
DEFAULT_LOG_CONFIGURATION = [
'openerp.workflow.workitem:WARNING',
'openerp.http.rpc.request:INFO',
'openerp.http.rpc.response:INFO',
'openerp.addons.web.http:INFO',
'openerp.sql_db:INFO',
':INFO',
]
PSEUDOCONFIG_MAPPER = {
'debug_rpc_answer': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG', 'openerp.http.rpc.response:DEBUG'],
'debug_rpc': ['openerp:DEBUG','openerp.http.rpc.request:DEBUG'],
'debug': ['openerp:DEBUG'],
'debug_sql': ['openerp.sql_db:DEBUG'],
'info': [],
'warn': ['openerp:WARNING', 'werkzeug:WARNING'],
'error': ['openerp:ERROR', 'werkzeug:ERROR'],
'critical': ['openerp:CRITICAL', 'werkzeug:CRITICAL'],
}
|
pongem/python-bot-project | refs/heads/master | appengine/standard/botapp/env/lib/python2.7/site-packages/django/contrib/postgres/forms/hstore.py | 70 | import json
from django import forms
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(forms.CharField):
"""
A field for HStore data which accepts dictionary JSON input.
"""
widget = forms.Textarea
default_error_messages = {
'invalid_json': _('Could not load JSON data.'),
'invalid_format': _('Input must be a JSON dictionary.'),
}
def prepare_value(self, value):
if isinstance(value, dict):
return json.dumps(value)
return value
def to_python(self, value):
if not value:
return {}
if not isinstance(value, dict):
try:
value = json.loads(value)
except ValueError:
raise ValidationError(
self.error_messages['invalid_json'],
code='invalid_json',
)
if not isinstance(value, dict):
raise ValidationError(
self.error_messages['invalid_format'],
code='invalid_format',
)
# Cast everything to strings for ease.
for key, val in value.items():
value[key] = six.text_type(val)
return value
def has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty dict, if the data or initial value we get
# is None, replace it w/ {}.
initial_value = self.to_python(initial)
return super(HStoreField, self).has_changed(initial_value, data)
|
illume/numpy3k | refs/heads/master | numpy/matrixlib/tests/test_regression.py | 3 | from numpy.testing import *
import numpy as np
rlevel = 1
class TestRegression(TestCase):
def test_kron_matrix(self,level=rlevel):
"""Ticket #71"""
x = np.matrix('[1 0; 1 0]')
assert_equal(type(np.kron(x,x)),type(x))
def test_matrix_properties(self,level=rlevel):
"""Ticket #125"""
a = np.matrix([1.0],dtype=float)
assert(type(a.real) is np.matrix)
assert(type(a.imag) is np.matrix)
c,d = np.matrix([0.0]).nonzero()
assert(type(c) is np.matrix)
assert(type(d) is np.matrix)
def test_matrix_multiply_by_1d_vector(self, level=rlevel) :
"""Ticket #473"""
def mul() :
np.mat(np.eye(2))*np.ones(2)
self.failUnlessRaises(ValueError,mul)
def test_matrix_std_argmax(self,level=rlevel):
"""Ticket #83"""
x = np.asmatrix(np.random.uniform(0,1,(3,3)))
self.assertEqual(x.std().shape, ())
self.assertEqual(x.argmax().shape, ())
|
InakiZabala/odoomrp-wip | refs/heads/8.0 | product_pricelist_rules/models/product.py | 25 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, api
class ProductTemplate(models.Model):
_inherit = "product.template"
@api.multi
def show_pricelists(self):
self.with_context(
{'search_default_pricelist_type_id': 1}).browse(self.ids)
result = self._get_act_window_dict(
'product_pricelist_rules.pricelist_items_action')
result['context'] = {'search_default_pricelist_type_id': 1,
'default_product_tmpl_id': self.id}
result['domain'] = [('product_tmpl_id', '=', self.id)]
return result
class ProductProduct(models.Model):
_inherit = "product.product"
@api.multi
def show_pricelists(self):
res = super(self.product_tmpl_id.__class__,
self.product_tmpl_id).show_pricelists()
if res:
res['context'] = {'search_default_pricelist_type_id': 1,
'default_product_id': self.id}
res['domain'] = ['|', ('product_id', '=', self.id),
'&', ('product_tmpl_id', '=',
self.product_tmpl_id.id),
('product_id', '=', False)]
return res
|
dgellis90/nipype | refs/heads/master | nipype/interfaces/fsl/tests/test_auto_Complex.py | 10 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..utils import Complex
def test_Complex_inputs():
input_map = dict(args=dict(argstr='%s',
),
complex_cartesian=dict(argstr='-complex',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
complex_in_file=dict(argstr='%s',
position=2,
),
complex_in_file2=dict(argstr='%s',
position=3,
),
complex_merge=dict(argstr='-complexmerge',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge', 'start_vol', 'end_vol'],
),
complex_out_file=dict(argstr='%s',
genfile=True,
position=-3,
xor=['complex_out_file', 'magnitude_out_file', 'phase_out_file', 'real_out_file', 'imaginary_out_file', 'real_polar', 'real_cartesian'],
),
complex_polar=dict(argstr='-complexpolar',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
complex_split=dict(argstr='-complexsplit',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
end_vol=dict(argstr='%d',
position=-1,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
imaginary_in_file=dict(argstr='%s',
position=3,
),
imaginary_out_file=dict(argstr='%s',
genfile=True,
position=-3,
xor=['complex_out_file', 'magnitude_out_file', 'phase_out_file', 'real_polar', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
magnitude_in_file=dict(argstr='%s',
position=2,
),
magnitude_out_file=dict(argstr='%s',
genfile=True,
position=-4,
xor=['complex_out_file', 'real_out_file', 'imaginary_out_file', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
output_type=dict(),
phase_in_file=dict(argstr='%s',
position=3,
),
phase_out_file=dict(argstr='%s',
genfile=True,
position=-3,
xor=['complex_out_file', 'real_out_file', 'imaginary_out_file', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
real_cartesian=dict(argstr='-realcartesian',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
real_in_file=dict(argstr='%s',
position=2,
),
real_out_file=dict(argstr='%s',
genfile=True,
position=-4,
xor=['complex_out_file', 'magnitude_out_file', 'phase_out_file', 'real_polar', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
real_polar=dict(argstr='-realpolar',
position=1,
xor=['real_polar', 'real_cartesian', 'complex_cartesian', 'complex_polar', 'complex_split', 'complex_merge'],
),
start_vol=dict(argstr='%d',
position=-2,
),
terminal_output=dict(nohash=True,
),
)
inputs = Complex.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_Complex_outputs():
output_map = dict(complex_out_file=dict(),
imaginary_out_file=dict(),
magnitude_out_file=dict(),
phase_out_file=dict(),
real_out_file=dict(),
)
outputs = Complex.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
sujeet4github/MyLangUtils | refs/heads/master | LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/sqlalchemy/orm/loading.py | 24 | # orm/loading.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from __future__ import absolute_import
from .. import util
from . import attributes, exc as orm_exc
from ..sql import util as sql_util
from . import strategy_options
from .util import _none_set, state_str
from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE
from .. import exc as sa_exc
import collections
_new_runid = util.counter()
def instances(query, cursor, context):
"""Return an ORM result as an iterator."""
context.runid = _new_runid()
filtered = query._has_mapper_entities
single_entity = len(query._entities) == 1 and \
query._entities[0].supports_single_entity
if filtered:
if single_entity:
filter_fn = id
else:
def filter_fn(row):
return tuple(
id(item)
if ent.use_id_for_hash
else item
for ent, item in zip(query._entities, row)
)
try:
(process, labels) = \
list(zip(*[
query_entity.row_processor(query,
context, cursor)
for query_entity in query._entities
]))
if not single_entity:
keyed_tuple = util.lightweight_named_tuple('result', labels)
while True:
context.partials = {}
if query._yield_per:
fetch = cursor.fetchmany(query._yield_per)
if not fetch:
break
else:
fetch = cursor.fetchall()
if single_entity:
proc = process[0]
rows = [proc(row) for row in fetch]
else:
rows = [keyed_tuple([proc(row) for proc in process])
for row in fetch]
if filtered:
rows = util.unique_list(rows, filter_fn)
for row in rows:
yield row
if not query._yield_per:
break
except Exception as err:
cursor.close()
util.raise_from_cause(err)
@util.dependencies("sqlalchemy.orm.query")
def merge_result(querylib, query, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session."""
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = len(query._entities) == 1
if single_entity:
if isinstance(query._entities[0], querylib._MapperEntity):
result = [session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive={}, _resolve_conflict_map={})
for instance in iterator]
else:
result = list(iterator)
else:
mapped_entities = [i for i, e in enumerate(query._entities)
if isinstance(e, querylib._MapperEntity)]
result = []
keys = [ent._label_name for ent in query._entities]
keyed_tuple = util.lightweight_named_tuple('result', keys)
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load, _recursive={}, _resolve_conflict_map={})
result.append(keyed_tuple(newrow))
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(session, key, passive):
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary
return instance
try:
state._load_expired(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(query, key,
refresh_state=None, lockmode=None,
only_load_props=None):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
else:
ident = None
if refresh_state is None:
q = query._clone()
q._get_condition()
else:
q = query._clone()
if ident is not None:
mapper = query._mapper_zero()
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in ident:
nones = set([
_get_params[col].key for col, value in
zip(mapper.primary_key, ident) if value is None
])
_get_clause = sql_util.adapt_criterion_to_null(
_get_clause, nones)
_get_clause = q._adapt_clause(_get_clause, True, False)
q._criterion = _get_clause
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
q._params = params
if lockmode is not None:
version_check = True
q = q.with_lockmode(lockmode)
elif query._for_update_arg is not None:
version_check = True
q._for_update_arg = query._for_update_arg
else:
version_check = False
q._get_options(
populate_existing=bool(refresh_state),
version_check=version_check,
only_load_props=only_load_props,
refresh_state=refresh_state)
q._order_by = None
try:
return q.one()
except orm_exc.NoResultFound:
return None
def _setup_entity_query(
context, mapper, query_entity,
path, adapter, column_collection,
with_polymorphic=None, only_load_props=None,
polymorphic_discriminator=None, **kw):
if with_polymorphic:
poly_properties = mapper._iterate_polymorphic_properties(
with_polymorphic)
else:
poly_properties = mapper._polymorphic_properties
quick_populators = {}
path.set(
context.attributes,
"memoized_setups",
quick_populators)
for value in poly_properties:
if only_load_props and \
value.key not in only_load_props:
continue
value.setup(
context,
query_entity,
path,
adapter,
only_load_props=only_load_props,
column_collection=column_collection,
memoized_populators=quick_populators,
**kw
)
if polymorphic_discriminator is not None and \
polymorphic_discriminator \
is not mapper.polymorphic_on:
if adapter:
pd = adapter.columns[polymorphic_discriminator]
else:
pd = polymorphic_discriminator
column_collection.append(pd)
def _instance_processor(
mapper, context, result, path, adapter,
only_load_props=None, refresh_state=None,
polymorphic_discriminator=None,
_polymorphic_from=None):
"""Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
pk_cols = mapper.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
identity_class = mapper._identity_class
populators = collections.defaultdict(list)
props = mapper._prop_set
if only_load_props is not None:
props = props.intersection(
mapper._props[k] for k in only_load_props)
quick_populators = path.get(
context.attributes, "memoized_setups", _none_set)
for prop in props:
if prop in quick_populators:
# this is an inlined path just for column-based attributes.
col = quick_populators[prop]
if col is _DEFER_FOR_STATE:
populators["new"].append(
(prop.key, prop._deferred_column_loader))
elif col is _SET_DEFERRED_EXPIRED:
# note that in this path, we are no longer
# searching in the result to see if the column might
# be present in some unexpected way.
populators["expire"].append((prop.key, False))
else:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((prop.key, getter))
else:
# fall back to the ColumnProperty itself, which
# will iterate through all of its columns
# to see if one fits
prop.create_row_processor(
context, path, mapper, result, adapter, populators)
else:
prop.create_row_processor(
context, path, mapper, result, adapter, populators)
propagate_options = context.propagate_options
load_path = context.query._current_path + path \
if context.query._current_path.path else path
session_identity_map = context.session.identity_map
populate_existing = context.populate_existing or mapper.always_refresh
load_evt = bool(mapper.class_manager.dispatch.load)
refresh_evt = bool(mapper.class_manager.dispatch.refresh)
persistent_evt = bool(context.session.dispatch.loaded_as_persistent)
if persistent_evt:
loaded_as_persistent = context.session.dispatch.loaded_as_persistent
instance_state = attributes.instance_state
instance_dict = attributes.instance_dict
session_id = context.session.hash_key
version_check = context.version_check
runid = context.runid
if refresh_state:
refresh_identity_key = refresh_state.key
if refresh_identity_key is None:
# super-rare condition; a refresh is being called
# on a non-instance-key instance; this is meant to only
# occur within a flush()
refresh_identity_key = \
mapper._identity_key_from_state(refresh_state)
else:
refresh_identity_key = None
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
else:
is_not_primary_key = _none_set.intersection
def _instance(row):
# determine the state that we'll be populating
if refresh_identity_key:
# fixed state that we're refreshing
state = refresh_state
instance = state.obj()
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = True
loaded_instance = False
else:
# look at the row, see if that identity is in the
# session, or we have to create a new one
identitykey = (
identity_class,
tuple([row[column] for column in pk_cols])
)
instance = session_identity_map.get(identitykey)
if instance is not None:
# existing instance
state = instance_state(instance)
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = not isnew
loaded_instance = False
if version_check and not currentload:
_validate_version_id(mapper, state, dict_, row, adapter)
else:
# create a new instance
# check for non-NULL values in the primary key columns,
# else no entity is returned for the row
if is_not_primary_key(identitykey[1]):
return None
isnew = True
currentload = True
loaded_instance = True
instance = mapper.class_manager.new_instance()
dict_ = instance_dict(instance)
state = instance_state(instance)
state.key = identitykey
# attach instance to session.
state.session_id = session_id
session_identity_map._add_unpresent(state, identitykey)
# populate. this looks at whether this state is new
# for this load or was existing, and whether or not this
# row is the first row with this identity.
if currentload or populate_existing:
# full population routines. Objects here are either
# just created, or we are doing a populate_existing
# be conservative about setting load_path when populate_existing
# is in effect; want to maintain options from the original
# load. see test_expire->test_refresh_maintains_deferred_options
if isnew and (propagate_options or not populate_existing):
state.load_options = propagate_options
state.load_path = load_path
_populate_full(
context, row, state, dict_, isnew, load_path,
loaded_instance, populate_existing, populators)
if isnew:
if loaded_instance:
if load_evt:
state.manager.dispatch.load(state, context)
if persistent_evt:
loaded_as_persistent(context.session, state.obj())
elif refresh_evt:
state.manager.dispatch.refresh(
state, context, only_load_props)
if populate_existing or state.modified:
if refresh_state and only_load_props:
state._commit(dict_, only_load_props)
else:
state._commit_all(dict_, session_identity_map)
else:
# partial population routines, for objects that were already
# in the Session, but a row matches them; apply eager loaders
# on existing objects, etc.
unloaded = state.unloaded
isnew = state not in context.partials
if not isnew or unloaded or populators["eager"]:
# state is having a partial set of its attributes
# refreshed. Populate those attributes,
# and add to the "context.partials" collection.
to_load = _populate_partial(
context, row, state, dict_, isnew, load_path,
unloaded, populators)
if isnew:
if refresh_evt:
state.manager.dispatch.refresh(
state, context, to_load)
state._commit(dict_, to_load)
return instance
if mapper.polymorphic_map and not _polymorphic_from and not refresh_state:
# if we are doing polymorphic, dispatch to a different _instance()
# method specific to the subclass mapper
_instance = _decorate_polymorphic_switch(
_instance, context, mapper, result, path,
polymorphic_discriminator, adapter)
return _instance
def _populate_full(
context, row, state, dict_, isnew, load_path,
loaded_instance, populate_existing, populators):
if isnew:
# first time we are seeing a row with this identity.
state.runid = context.runid
for key, getter in populators["quick"]:
dict_[key] = getter(row)
if populate_existing:
for key, set_callable in populators["expire"]:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
else:
for key, set_callable in populators["expire"]:
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
populator(state, dict_, row)
elif load_path != state.load_path:
# new load path, e.g. object is present in more than one
# column position in a series of rows
state.load_path = load_path
# if we have data, and the data isn't in the dict, OK, let's put
# it in.
for key, getter in populators["quick"]:
if key not in dict_:
dict_[key] = getter(row)
# otherwise treat like an "already seen" row
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: allow "existing" populator to know this is
# a new path for the state:
# populator(state, dict_, row, new_path=True)
else:
# have already seen rows with this identity in this same path.
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: same path
# populator(state, dict_, row, new_path=False)
def _populate_partial(
context, row, state, dict_, isnew, load_path,
unloaded, populators):
if not isnew:
to_load = context.partials[state]
for key, populator in populators["existing"]:
if key in to_load:
populator(state, dict_, row)
else:
to_load = unloaded
context.partials[state] = to_load
for key, getter in populators["quick"]:
if key in to_load:
dict_[key] = getter(row)
for key, set_callable in populators["expire"]:
if key in to_load:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["eager"]:
if key not in unloaded:
populator(state, dict_, row)
return to_load
def _validate_version_id(mapper, state, dict_, row, adapter):
version_id_col = mapper.version_id_col
if version_id_col is None:
return
if adapter:
version_id_col = adapter.columns[version_id_col]
if mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col) != row[version_id_col]:
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (state_str(state), mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col),
row[version_id_col]))
def _decorate_polymorphic_switch(
instance_fn, context, mapper, result, path,
polymorphic_discriminator, adapter):
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
if polymorphic_on is None:
return instance_fn
if adapter:
polymorphic_on = adapter.columns[polymorphic_on]
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" %
discriminator)
else:
if sub_mapper is mapper:
return None
return _instance_processor(
sub_mapper, context, result,
path, adapter, _polymorphic_from=mapper)
polymorphic_instances = util.PopulateDict(
configure_subclass_mapper
)
def polymorphic_instance(row):
discriminator = row[polymorphic_on]
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row)
return instance_fn(row)
return polymorphic_instance
def load_scalar_attributes(mapper, state, attribute_names):
"""initiate a column-based attribute refresh operation."""
# assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" %
(state_str(state)))
has_key = bool(state.key)
result = False
if mapper.inherits and not mapper.concrete:
# because we are using Core to produce a select() that we
# pass to the Query, we aren't calling setup() for mapped
# attributes; in 1.0 this means deferred attrs won't get loaded
# by default
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
result = load_on_ident(
session.query(mapper).
options(
strategy_options.Load(mapper).undefer("*")
).from_statement(statement),
None,
only_load_props=attribute_names,
refresh_state=state
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [mapper._columntoproperty[col].key
for col in mapper.primary_key]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state))
identity_key = mapper._identity_key_from_state(state)
if (_none_set.issubset(identity_key) and
not mapper.allow_partial_pks) or \
_none_set.issuperset(identity_key):
util.warn_limited(
"Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either).",
state_str(state))
return
result = load_on_ident(
session.query(mapper),
identity_key,
refresh_state=state,
only_load_props=attribute_names)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
|
smi96/django-blog_website | refs/heads/master | lib/python2.7/site-packages/PIL/ImageDraw.py | 13 | #
# The Python Imaging Library
# $Id$
#
# drawing interface operations
#
# History:
# 1996-04-13 fl Created (experimental)
# 1996-08-07 fl Filled polygons, ellipses.
# 1996-08-13 fl Added text support
# 1998-06-28 fl Handle I and F images
# 1998-12-29 fl Added arc; use arc primitive to draw ellipses
# 1999-01-10 fl Added shape stuff (experimental)
# 1999-02-06 fl Added bitmap support
# 1999-02-11 fl Changed all primitives to take options
# 1999-02-20 fl Fixed backwards compatibility
# 2000-10-12 fl Copy on write, when necessary
# 2001-02-18 fl Use default ink for bitmap/text also in fill mode
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing
# 2002-12-11 fl Refactored low-level drawing API (work in progress)
# 2004-08-26 fl Made Draw() a factory function, added getdraw() support
# 2004-09-04 fl Added width support to line primitive
# 2004-09-10 fl Added font mode handling
# 2006-06-19 fl Added font bearing support (getmask2)
#
# Copyright (c) 1997-2006 by Secret Labs AB
# Copyright (c) 1996-2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import numbers
import warnings
from PIL import Image, ImageColor
from PIL._util import isStringType
##
# A simple 2D drawing interface for PIL images.
# <p>
# Application code should use the <b>Draw</b> factory, instead of
# directly.
class ImageDraw(object):
##
# Create a drawing instance.
#
# @param im The image to draw in.
# @param mode Optional mode to use for color values. For RGB
# images, this argument can be RGB or RGBA (to blend the
# drawing into the image). For all other modes, this argument
# must be the same as the image mode. If omitted, the mode
# defaults to the mode of the image.
def __init__(self, im, mode=None):
im.load()
if im.readonly:
im._copy() # make it writeable
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
raise ValueError("mode mismatch")
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1, mode)
else:
self.ink = self.draw.draw_ink(-1, mode)
if mode in ("1", "P", "I", "F"):
# FIXME: fix Fill2 to properly support matte for I+F images
self.fontmode = "1"
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = 0
self.font = None
def setink(self, ink):
raise Exception("setink() has been removed. " +
"Please use keyword arguments instead.")
def setfill(self, onoff):
raise Exception("setfill() has been removed. " +
"Please use keyword arguments instead.")
def setfont(self, font):
warnings.warn("setfont() is deprecated. " +
"Please set the attribute directly instead.")
# compatibility
self.font = font
##
# Get the current default font.
def getfont(self):
if not self.font:
# FIXME: should add a font repository
from PIL import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not isinstance(ink, numbers.Number):
ink = self.palette.getcolor(ink)
ink = self.draw.draw_ink(ink, self.mode)
if fill is not None:
if isStringType(fill):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and not isinstance(fill, numbers.Number):
fill = self.palette.getcolor(fill)
fill = self.draw.draw_ink(fill, self.mode)
return ink, fill
##
# Draw an arc.
def arc(self, xy, start, end, fill=None):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink)
##
# Draw a bitmap.
def bitmap(self, xy, bitmap, fill=None):
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
##
# Draw a chord.
def chord(self, xy, start, end, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_chord(xy, start, end, ink, 0)
##
# Draw an ellipse.
def ellipse(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_ellipse(xy, fill, 1)
if ink is not None:
self.draw.draw_ellipse(xy, ink, 0)
##
# Draw a line, or a connected sequence of line segments.
def line(self, xy, fill=None, width=0):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_lines(xy, ink, width)
##
# (Experimental) Draw a shape.
def shape(self, shape, fill=None, outline=None):
# experimental
shape.close()
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_outline(shape, fill, 1)
if ink is not None:
self.draw.draw_outline(shape, ink, 0)
##
# Draw a pieslice.
def pieslice(self, xy, start, end, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_pieslice(xy, start, end, ink, 0)
##
# Draw one or more individual pixels.
def point(self, xy, fill=None):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_points(xy, ink)
##
# Draw a polygon.
def polygon(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_polygon(xy, fill, 1)
if ink is not None:
self.draw.draw_polygon(xy, ink, 0)
##
# Draw a rectangle.
def rectangle(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_rectangle(xy, fill, 1)
if ink is not None:
self.draw.draw_rectangle(xy, ink, 0)
##
# Draw text.
def _multiline_check(self, text):
split_character = "\n" if isinstance(text, type("")) else b"\n"
return split_character in text
def _multiline_split(self, text):
split_character = "\n" if isinstance(text, type("")) else b"\n"
return text.split(split_character)
def text(self, xy, text, fill=None, font=None, anchor=None):
if self._multiline_check(text):
return self.multiline_text(xy, text, fill, font, anchor)
ink, fill = self._getink(fill)
if font is None:
font = self.getfont()
if ink is None:
ink = fill
if ink is not None:
try:
mask, offset = font.getmask2(text, self.fontmode)
xy = xy[0] + offset[0], xy[1] + offset[1]
except AttributeError:
try:
mask = font.getmask(text, self.fontmode)
except TypeError:
mask = font.getmask(text)
self.draw.draw_bitmap(xy, mask, ink)
def multiline_text(self, xy, text, fill=None, font=None, anchor=None,
spacing=4, align="left"):
widths = []
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font)
widths.append(line_width)
max_width = max(max_width, line_width)
left, top = xy
for idx, line in enumerate(lines):
if align == "left":
pass # left = x
elif align == "center":
left += (max_width - widths[idx]) / 2.0
elif align == "right":
left += (max_width - widths[idx])
else:
assert False, 'align must be "left", "center" or "right"'
self.text((left, top), line, fill, font, anchor)
top += line_spacing
left = xy[0]
##
# Get the size of a given string, in pixels.
def textsize(self, text, font=None):
if self._multiline_check(text):
return self.multiline_textsize(text, font)
if font is None:
font = self.getfont()
return font.getsize(text)
def multiline_textsize(self, text, font=None, spacing=4):
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font)
max_width = max(max_width, line_width)
return max_width, len(lines)*line_spacing
##
# A simple 2D drawing interface for PIL images.
#
# @param im The image to draw in.
# @param mode Optional mode to use for color values. For RGB
# images, this argument can be RGB or RGBA (to blend the
# drawing into the image). For all other modes, this argument
# must be the same as the image mode. If omitted, the mode
# defaults to the mode of the image.
def Draw(im, mode=None):
try:
return im.getdraw(mode)
except AttributeError:
return ImageDraw(im, mode)
# experimental access to the outline API
try:
Outline = Image.core.outline
except AttributeError:
Outline = None
##
# (Experimental) A more advanced 2D drawing interface for PIL images,
# based on the WCK interface.
#
# @param im The image to draw in.
# @param hints An optional list of hints.
# @return A (drawing context, drawing resource factory) tuple.
def getdraw(im=None, hints=None):
# FIXME: this needs more work!
# FIXME: come up with a better 'hints' scheme.
handler = None
if not hints or "nicest" in hints:
try:
from PIL import _imagingagg as handler
except ImportError:
pass
if handler is None:
from PIL import ImageDraw2 as handler
if im:
im = handler.Draw(im)
return im, handler
##
# (experimental) Fills a bounded region with a given color.
#
# @param image Target image.
# @param xy Seed position (a 2-item coordinate tuple).
# @param value Fill color.
# @param border Optional border value. If given, the region consists of
# pixels with a color different from the border color. If not given,
# the region consists of pixels having the same color as the seed
# pixel.
def floodfill(image, xy, value, border=None):
"Fill bounded region."
# based on an implementation by Eric S. Raymond
pixel = image.load()
x, y = xy
try:
background = pixel[x, y]
if background == value:
return # seed point already has fill color
pixel[x, y] = value
except IndexError:
return # seed point outside image
edge = [(x, y)]
if border is None:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p == background:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
else:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p != value and p != border:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
|
fengbaicanhe/intellij-community | refs/heads/master | python/helpers/docutils/writers/odf_odt/pygmentsformatter.py | 244 | # $Id: pygmentsformatter.py 5853 2009-01-19 21:02:02Z dkuhlman $
# Author: Dave Kuhlman <dkuhlman@rexx.com>
# Copyright: This module has been placed in the public domain.
"""
Additional support for Pygments formatter.
"""
import pygments
import pygments.formatter
class OdtPygmentsFormatter(pygments.formatter.Formatter):
def __init__(self, rststyle_function, escape_function):
pygments.formatter.Formatter.__init__(self)
self.rststyle_function = rststyle_function
self.escape_function = escape_function
def rststyle(self, name, parameters=( )):
return self.rststyle_function(name, parameters)
class OdtPygmentsProgFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Literal.String:
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (
tokenclass.Literal.Number.Integer,
tokenclass.Literal.Number.Integer.Long,
tokenclass.Literal.Number.Float,
tokenclass.Literal.Number.Hex,
tokenclass.Literal.Number.Oct,
tokenclass.Literal.Number,
):
s2 = self.rststyle('codeblock-number')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Operator:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Class:
s2 = self.rststyle('codeblock-classname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Function:
s2 = self.rststyle('codeblock-functionname')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
class OdtPygmentsLaTeXFormatter(OdtPygmentsFormatter):
def format(self, tokensource, outfile):
tokenclass = pygments.token.Token
for ttype, value in tokensource:
value = self.escape_function(value)
if ttype == tokenclass.Keyword:
s2 = self.rststyle('codeblock-keyword')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (tokenclass.Literal.String,
tokenclass.Literal.String.Backtick,
):
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Attribute:
s2 = self.rststyle('codeblock-operator')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Comment:
if value[-1] == '\n':
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>\n' % \
(s2, value[:-1], )
else:
s2 = self.rststyle('codeblock-comment')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype == tokenclass.Name.Builtin:
s2 = self.rststyle('codeblock-name')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
else:
s1 = value
outfile.write(s1)
|
matiaskotlik/1900-Titles-Reloaded | refs/heads/master | src/vector.py | 1 | import math
from collections import namedtuple
class vec2(namedtuple('vec2', ['x', 'y'])):
def length(self):
"""
Get length of a vector
"""
return math.sqrt(self.length_squared())
def length_squared(self):
"""
Get length squared of a vector
"""
return (self[0]**2)+(self[1]**2)
def __len__(self):
"""
Get length of a vector
"""
return self.length()
def normalize(self):
"""
Normalize a vector
"""
return self / self.length()
def dist(self, other):
"""
Get distance from one vector to another
"""
return math.sqrt(self.dist_squared(other))
def dist_squared(self, other):
"""
Get distance squared from one vector to another
"""
return (self[0]-other[0])**2+(self[1]-other[1])**2
def mod_all(self, func):
"""
Apply func() to self[0] and self[1]
"""
return vec2(func(self[0]), func(self[1]))
def integer(self):
"""
Convert vector to integers
"""
return self.mod_all(int)
def float(self):
"""
Convert vector to floats
"""
return self.mod_all(float)
def to_angle(self):
"""
Get angle of vector
"""
angle = math.degrees(math.atan2(self[1],self[0]))
if angle < 0:
angle += 360
return angle
def __mul__(self, other):
try:
other.__getitem__
return vec2(self[0] * other[0], self[1] * other[1])
except AttributeError:
return self.mod_all(lambda x: x * other)
def __add__(self, other):
try:
other.__getitem__
return vec2(self[0] + other[0], self[1] + other[1])
except AttributeError:
return self.mod_all(lambda x: x + other)
def __sub__(self, other):
try:
other.__getitem__
return vec2(self[0] - other[0], self[1] - other[1])
except AttributeError:
return self.mod_all(lambda x: x - other)
def __mod__(self, other):
try:
other.__getitem__
return vec2(self[0] % other[0], self[1] % other[1])
except AttributeError:
return self.mod_all(lambda x: x - other)
def __floordiv__(self, other):
try:
try:
other.__getitem__
return vec2(self[0] // other[0], self[1] // other[1])
except AttributeError:
return self.mod_all(lambda x: x // other)
except ZeroDivisionError:
return vec2(0, 0)
def __truediv__(self, other):
try:
try:
other.__getitem__
return vec2(self[0] / other[0], self[1] / other[1])
except AttributeError:
return self.mod_all(lambda x: x / other)
except ZeroDivisionError:
return vec2(0, 0)
def __neg__(self):
return vec2(-self[0], -self[1])
def __bool__(self):
return 0 not in self
def __repr__(self):
return 'vec2({}, {})'.format(self[0], self[1])
__rmul__ = __mul__
__rtruediv__ = __truediv__
__rmod__ = __mod__
__radd__ = __add__
__rsub__ = __sub__
def toVector(angle):
angle = math.radians(angle)
return vec2(math.cos(angle), math.sin(angle)).normalize()
|
proversity-org/edx-platform | refs/heads/master | cms/djangoapps/contentstore/views/tests/test_credit_eligibility.py | 25 | """
Unit tests for credit eligibility UI in Studio.
"""
import mock
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from openedx.core.djangoapps.credit.api import get_credit_requirements
from openedx.core.djangoapps.credit.models import CreditCourse
from openedx.core.djangoapps.credit.signals import on_course_publish
from xmodule.modulestore.tests.factories import CourseFactory
class CreditEligibilityTest(CourseTestCase):
"""
Base class to test the course settings details view in Studio for credit
eligibility requirements.
"""
def setUp(self):
super(CreditEligibilityTest, self).setUp()
self.course = CourseFactory.create(org='edX', number='dummy', display_name='Credit Course')
self.course_details_url = reverse_course_url('settings_handler', unicode(self.course.id))
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_CREDIT_ELIGIBILITY': False})
def test_course_details_with_disabled_setting(self):
"""
Test that user don't see credit eligibility requirements in response
if the feature flag 'ENABLE_CREDIT_ELIGIBILITY' is not enabled.
"""
response = self.client.get_html(self.course_details_url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Course Credit Requirements")
self.assertNotContains(response, "Steps required to earn course credit")
@mock.patch.dict("django.conf.settings.FEATURES", {'ENABLE_CREDIT_ELIGIBILITY': True})
def test_course_details_with_enabled_setting(self):
"""
Test that credit eligibility requirements are present in
response if the feature flag 'ENABLE_CREDIT_ELIGIBILITY' is enabled.
"""
# verify that credit eligibility requirements block don't show if the
# course is not set as credit course
response = self.client.get_html(self.course_details_url)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Course Credit Requirements")
self.assertNotContains(response, "Steps required to earn course credit")
# verify that credit eligibility requirements block shows if the
# course is set as credit course and it has eligibility requirements
credit_course = CreditCourse(course_key=unicode(self.course.id), enabled=True)
credit_course.save()
self.assertEqual(len(get_credit_requirements(self.course.id)), 0)
# test that after publishing course, minimum grade requirement is added
on_course_publish(self.course.id)
self.assertEqual(len(get_credit_requirements(self.course.id)), 1)
response = self.client.get_html(self.course_details_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Course Credit Requirements")
self.assertContains(response, "Steps required to earn course credit")
|
learningequality/kolibri | refs/heads/develop | kolibri/core/auth/utils.py | 2 | import sys
from django.utils.six.moves import input
from kolibri.core.auth.models import AdHocGroup
from kolibri.core.auth.models import FacilityDataset
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.models import Membership
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import MasteryLog
from kolibri.core.logger.models import UserSessionLog
def confirm_or_exit(message):
answer = ""
while answer not in ["yes", "n", "no"]:
answer = input("{} [Type 'yes' or 'no'.] ".format(message)).lower()
if answer != "yes":
print("Canceled! Exiting without touching the database.")
sys.exit(1)
def create_adhoc_group_for_learners(classroom, learners):
adhoc_group = AdHocGroup.objects.create(name="", parent=classroom)
for learner in learners:
Membership.objects.create(user=learner, collection=adhoc_group)
return adhoc_group
def _merge_user_models(source_user, target_user):
for f in ["gender", "birth_year", "id_number"]:
source_value = getattr(source_user, f, None)
target_value = getattr(target_user, f, None)
if not target_value and source_value:
setattr(target_user, f, source_value)
target_user.save()
blocklist = set(["id", "_morango_partition"])
def merge_users(source_user, target_user):
"""
Utility to merge two users. It makes no assumptions about whether
the users are in the same facility and does raw copies of all
associated user data, rather than try to do anything clever.
"""
if source_user.id == target_user.id:
raise ValueError("Cannot merge a user with themselves")
_merge_user_models(source_user, target_user)
id_map = {
FacilityUser: {source_user.id: target_user.id},
FacilityDataset: {
source_user.dataset_id: target_user.dataset_id,
},
}
def _merge_log_data(LogModel):
log_map = {}
id_map[LogModel] = log_map
new_logs = []
related_fields = [f for f in LogModel._meta.concrete_fields if f.is_relation]
source_logs = LogModel.objects.filter(user=source_user)
target_log_ids = set(
LogModel.objects.filter(user=target_user).values_list("id", flat=True)
)
for log in source_logs:
# Get all serialializable fields
data = log.serialize()
# Remove fields that we explicitly know we don't want to copy
for f in blocklist:
if f in data:
del data[f]
# Iterate through each relation and map the old id to the new id for the foreign key
for relation in related_fields:
data[relation.attname] = id_map[relation.related_model][
data[relation.attname]
]
# If this is a randomly created source id, preserve it, so we can stop the same logs
# being copied in repeatedly. If it is not random, remove it, so we can recreate
# it on the target.
if log.calculate_source_id() is not None:
del data["_morango_source_id"]
new_log = LogModel.deserialize(data)
if not new_log._morango_source_id:
new_log.id = new_log.calculate_uuid()
else:
# Have to do this, otherwise morango will overwrite the current source id on the model
new_log.id = new_log.compute_namespaced_id(
new_log.calculate_partition(),
new_log._morango_source_id,
new_log.morango_model_name,
)
new_log._morango_partition = new_log.calculate_partition().replace(
new_log.ID_PLACEHOLDER, new_log.id
)
log_map[log.id] = new_log.id
if new_log.id not in target_log_ids:
new_logs.append(new_log)
LogModel.objects.bulk_create(new_logs, batch_size=750)
_merge_log_data(ContentSessionLog)
_merge_log_data(ContentSummaryLog)
_merge_log_data(UserSessionLog)
_merge_log_data(MasteryLog)
_merge_log_data(AttemptLog)
|
laurent-george/weboob | refs/heads/master | modules/ehentai/pages.py | 3 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Roger Philibert
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.deprecated.browser import Page
from weboob.tools.html import html2text
from weboob.capabilities.image import BaseImage
from datetime import datetime
import re
from .gallery import EHentaiGallery
class LoginPage(Page):
def is_logged(self):
success_p = self.document.xpath(
'//p[text() = "Login Successful. You will be returned momentarily."]')
if len(success_p):
return True
else:
return False
class HomePage(Page):
pass
class IndexPage(Page):
def iter_galleries(self):
lines = self.document.xpath('//table[@class="itg"]//tr[@class="gtr0" or @class="gtr1"]')
for line in lines:
a = line.xpath('.//div[@class="it3"]/a')[-1]
url = a.attrib["href"]
title = a.text.strip()
yield EHentaiGallery(re.search('(?<=/g/)\d+/[\dabcdef]+', url).group(0), title=title)
class GalleryPage(Page):
def image_pages(self):
return self.document.xpath('//div[@class="gdtm"]//a/attribute::href')
def _page_numbers(self):
return [n for n in self.document.xpath("(//table[@class='ptt'])[1]//td/text()") if re.match(r"\d+", n)]
def gallery_exists(self, gallery):
if self.document.xpath("//h1"):
return True
else:
return False
def fill_gallery(self, gallery):
gallery.title = self.document.xpath("//h1[@id='gn']/text()")[0]
try:
gallery.original_title = self.document.xpath("//h1[@id='gj']/text()")[0]
except IndexError:
gallery.original_title = None
description_div = self.document.xpath("//div[@id='gd71']")[0]
description_html = self.parser.tostring(description_div)
gallery.description = html2text(description_html)
cardinality_string = self.document.xpath("//div[@id='gdd']//tr[td[@class='gdt1']/text()='Images:']/td[@class='gdt2']/text()")[0]
gallery.cardinality = int(re.match(r"\d+", cardinality_string).group(0))
date_string = self.document.xpath("//div[@id='gdd']//tr[td[@class='gdt1']/text()='Posted:']/td[@class='gdt2']/text()")[0]
gallery.date = datetime.strptime(date_string, "%Y-%m-%d %H:%M")
rating_string = self.document.xpath("//td[@id='rating_label']/text()")[0]
rating_match = re.search(r"\d+\.\d+", rating_string)
if rating_match is None:
gallery.rating = None
else:
gallery.rating = float(rating_match.group(0))
gallery.rating_max = 5
try:
thumbnail_url = self.document.xpath("//div[@class='gdtm']/a/img/attribute::src")[0]
except IndexError:
thumbnail_style = self.document.xpath("//div[@class='gdtm']/div/attribute::style")[0]
thumbnail_url = re.search(r"background:[^;]+url\((.+?)\)", thumbnail_style).group(1)
gallery.thumbnail = BaseImage(thumbnail_url)
gallery.thumbnail.url = gallery.thumbnail.id
class ImagePage(Page):
def get_url(self):
return self.document.xpath('//div[@class="sni"]/a/img/attribute::src')[0]
|
lzw120/django | refs/heads/master | build/lib/django/contrib/sites/models.py | 387 | from django.db import models
from django.utils.translation import ugettext_lazy as _
SITE_CACHE = {}
class SiteManager(models.Manager):
def get_current(self):
"""
Returns the current ``Site`` based on the SITE_ID in the
project's settings. The ``Site`` object is cached the first
time it's retrieved from the database.
"""
from django.conf import settings
try:
sid = settings.SITE_ID
except AttributeError:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You're using the Django \"sites framework\" without having set the SITE_ID setting. Create a site in your database and set the SITE_ID setting to fix this error.")
try:
current_site = SITE_CACHE[sid]
except KeyError:
current_site = self.get(pk=sid)
SITE_CACHE[sid] = current_site
return current_site
def clear_cache(self):
"""Clears the ``Site`` object cache."""
global SITE_CACHE
SITE_CACHE = {}
class Site(models.Model):
domain = models.CharField(_('domain name'), max_length=100)
name = models.CharField(_('display name'), max_length=50)
objects = SiteManager()
class Meta:
db_table = 'django_site'
verbose_name = _('site')
verbose_name_plural = _('sites')
ordering = ('domain',)
def __unicode__(self):
return self.domain
def save(self, *args, **kwargs):
super(Site, self).save(*args, **kwargs)
# Cached information will likely be incorrect now.
if self.id in SITE_CACHE:
del SITE_CACHE[self.id]
def delete(self):
pk = self.pk
super(Site, self).delete()
try:
del SITE_CACHE[pk]
except KeyError:
pass
class RequestSite(object):
"""
A class that shares the primary interface of Site (i.e., it has
``domain`` and ``name`` attributes) but gets its data from a Django
HttpRequest object rather than from a database.
The save() and delete() methods raise NotImplementedError.
"""
def __init__(self, request):
self.domain = self.name = request.get_host()
def __unicode__(self):
return self.domain
def save(self, force_insert=False, force_update=False):
raise NotImplementedError('RequestSite cannot be saved.')
def delete(self):
raise NotImplementedError('RequestSite cannot be deleted.')
def get_current_site(request):
"""
Checks if contrib.sites is installed and returns either the current
``Site`` object or a ``RequestSite`` object based on the request.
"""
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(request)
return current_site
|
ptitjes/quodlibet | refs/heads/master | quodlibet/formats/_audio.py | 1 | # Copyright 2004-2005 Joe Wreschnig, Michael Urman
# 2012-2017 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Much of this code is highly optimized, because many of the functions
# are called in tight loops. Don't change things just to make them
# more readable, unless they're also faster.
import os
import re
import shutil
import time
from typing import List
from collections import OrderedDict
from itertools import zip_longest
from senf import fsn2uri, fsnative, fsn2text, devnull, bytes2fsn, path2fsn
from quodlibet import _, print_d
from quodlibet import util
from quodlibet import config
from quodlibet.util.path import mkdir, mtime, expanduser, normalize_path, \
ismount, get_home_dir, RootPathFile
from quodlibet.util.string import encode, decode, isascii
from quodlibet.util.environment import is_windows
from quodlibet.util import iso639
from quodlibet.util import human_sort_key as human, capitalize
from quodlibet.util.tags import TAG_ROLES, TAG_TO_SORT
from ._image import ImageContainer
from ._misc import AudioFileError, translate_errors
translate_errors
MIGRATE = {"~#playcount", "~#laststarted", "~#lastplayed", "~#added",
"~#skipcount", "~#rating", "~bookmark"}
"""These get migrated if a song gets reloaded"""
PEOPLE = ["artist", "albumartist", "author", "composer", "~performers",
"originalartist", "lyricist", "arranger", "conductor"]
"""Sources of the ~people tag, most important first"""
TIME_TAGS = {"~#lastplayed", "~#laststarted", "~#added", "~#mtime"}
"""Time in seconds since epoch, defaults to 0"""
SIZE_TAGS = {"~#filesize"}
"""Size in bytes, defaults to 0"""
NUMERIC_ZERO_DEFAULT = {"~#skipcount", "~#playcount", "~#length", "~#bitrate"}
"""Defaults to 0"""
NUMERIC_ZERO_DEFAULT.update(TIME_TAGS)
NUMERIC_ZERO_DEFAULT.update(SIZE_TAGS)
FILESYSTEM_TAGS = {"~filename", "~basename", "~dirname", "~mountpoint"}
"""Values are bytes in Linux instead of unicode"""
SORT_TO_TAG = dict([(v, k) for (k, v) in TAG_TO_SORT.items()])
"""Reverse map, so sort tags can fall back to the normal ones"""
PEOPLE_SORT = [TAG_TO_SORT.get(k, k) for k in PEOPLE]
"""Sources for ~peoplesort, most important first"""
VARIOUS_ARTISTS_VALUES = 'V.A.', 'various artists', 'Various Artists'
"""Values for ~people representing lots of people, most important last"""
def decode_value(tag, value):
"""Returns a unicode representation of the passed value, based on
the type and the tag it originated from.
Not reversible.
"""
if tag in FILESYSTEM_TAGS:
return fsn2text(value)
elif tag[:2] == "~#":
if isinstance(value, float):
return u"%.2f" % value
else:
return str(value)
return str(value)
class AudioFile(dict, ImageContainer):
"""An audio file. It looks like a dict, but implements synthetic
and tied tags via __call__ rather than __getitem__. This means
__getitem__, get, and so on can be used for efficiency.
If you need to sort many AudioFiles, you can use their sort_key
attribute as a decoration.
Keys are either ASCII str or unicode.
Values are always unicode except if the tag is part of FILESYSTEM_TAGS,
then the value is of the path type (str on UNIX, unicode on Windows)
Some methods will make sure the returned values are always unicode, see
their description.
"""
fill_metadata = False
"""New tags received from the backend will update the song"""
fill_length = False
"""New song duration from the backend will update the song"""
multisong = False
"""Is a container for multiple songs
i.e. while played new songs can start / end"""
streamsong = False
"""Is part of a multisong"""
can_add = True
"""Can be added to the queue, playlists"""
is_file = True
"""Is a real (local) file"""
format = "Unknown Audio File"
"""The underlying file format"""
mimes: List[str] = []
"""MIME types this class can represent"""
def __init__(self, default=tuple(), **kwargs):
for key, value in dict(default).items():
self[key] = value
for key, value in kwargs.items():
self[key] = value
def __song_key(self):
return (self("~#disc", 0), self("~#track", 0),
human(self("artistsort")),
self.get("musicbrainz_artistid", ""),
human(self.get("title", "")),
self.get("~filename"))
@util.cached_property
def album_key(self):
return (human(self("albumsort", "")),
human(self("albumartistsort", "")),
self.get("album_grouping_key") or self.get("labelid") or
self.get("musicbrainz_albumid") or "")
@util.cached_property
def sort_key(self):
return [self.album_key, self.__song_key()]
@staticmethod
def sort_by_func(tag):
"""Returns a fast sort function for a specific tag (or pattern).
Some keys are already in the sort cache, so we can use them."""
def artist_sort(song):
return song.sort_key[1][2]
if callable(tag):
return lambda song: human(tag(song))
elif tag == "artistsort":
return artist_sort
elif tag in FILESYSTEM_TAGS:
return lambda song: fsn2text(song(tag))
elif tag.startswith("~#") and "~" not in tag[2:]:
return lambda song: song(tag, 0)
return lambda song: human(song(tag))
def __getstate__(self):
"""Don't pickle anything from __dict__"""
pass
def __setstate__(self, state):
"""Needed because we have defined getstate"""
pass
def __setitem__(self, key, value):
# validate key
if not isinstance(key, str):
raise TypeError("key has to be str")
# validate value
if key.startswith("~#"):
if not isinstance(value, (int, float)):
raise TypeError
elif key in FILESYSTEM_TAGS:
if not isinstance(value, fsnative):
value = path2fsn(value)
else:
value = str(value)
dict.__setitem__(self, key, value)
pop = self.__dict__.pop
pop("album_key", None)
pop("sort_key", None)
def __delitem__(self, key):
dict.__delitem__(self, key)
pop = self.__dict__.pop
pop("album_key", None)
pop("sort_key", None)
@property
def key(self):
return self["~filename"]
@property
def mountpoint(self):
return self["~mountpoint"]
def __hash__(self):
# Dicts aren't hashable by default, so we need a hash
# function. Previously this used ~filename. That created a
# situation when an object could end up in two buckets by
# renaming files. So now it uses identity.
return hash(id(self))
def __eq__(self, other):
# And to preserve Python hash rules, we need a strict __eq__.
return self is other
def __lt__(self, other):
return self.sort_key < other.sort_key
def __ne__(self, other):
return self is not other
def reload(self):
"""Reload an audio file from disk. If reloading fails nothing will
change.
Raises:
AudioFileError: if the file fails to load
"""
backup = dict(self)
fn = self["~filename"]
saved = {}
for key in self:
if key in MIGRATE:
saved[key] = self[key]
self.clear()
self["~filename"] = fn
try:
self.__init__(fn)
except AudioFileError:
self.update(backup)
raise
else:
self.update(saved)
def realkeys(self):
"""Returns a list of keys that are not internal, i.e. they don't
have '~' in them."""
return list(filter(lambda s: s[:1] != "~", self.keys()))
def prefixkeys(self, prefix):
"""Returns a list of dict keys that either match prefix or start
with prefix + ':'.
"""
l = []
for k in self:
if k.startswith(prefix):
if k == prefix or k.startswith(prefix + ":"):
l.append(k)
return l
def _prefixvalue(self, tag):
return "\n".join(self.list_unique(sorted(self.prefixkeys(tag))))
def iterrealitems(self):
return ((k, v) for (k, v) in self.items() if k[:1] != "~")
def __call__(self, key, default=u"", connector=" - ", joiner=', '):
"""Return the value(s) for a key, synthesizing if necessary.
Multiple values for a key are delimited by newlines.
A default value may be given (like `dict.get`);
the default default is an empty unicode string
(even if the tag is numeric).
If a tied tag ('a~b') is requested, the `connector` keyword
argument may be used to specify what it is tied with.
In case the tied tag contains numeric and file path tags, the result
will still be a unicode string.
The `joiner` keyword specifies how multiple *values* will be joined
within that tied tag output, e.g.
~people~title = "Kanye West, Jay Z - New Day"
For details on tied tags, see the documentation for `util.tagsplit`.
"""
if key[:1] == "~":
key = key[1:]
if "~" in key:
real_key = "~" + key
values = []
sub_tags = util.tagsplit(real_key)
# If it's genuinely a tied tag (not ~~people etc), we want
# to delimit the multi-values separately from the tying
j = joiner if len(sub_tags) > 1 else "\n"
for t in sub_tags:
vs = [decode_value(real_key, v) for v in (self.list(t))]
v = j.join(vs)
if v:
values.append(v)
return connector.join(values) or default
elif key == "#track":
try:
return int(self["tracknumber"].split("/")[0])
except (ValueError, TypeError, KeyError):
return default
elif key == "#disc":
try:
return int(self["discnumber"].split("/")[0])
except (ValueError, TypeError, KeyError):
return default
elif key == "length":
length = self.get("~#length")
if length is None:
return default
else:
return util.format_time_display(length)
elif key == "#rating":
return dict.get(self, "~" + key, config.RATINGS.default)
elif key == "rating":
return util.format_rating(self("~#rating"))
elif key == "people":
return "\n".join(self.list_unique(PEOPLE)) or default
elif key == "people:real":
# Issue 1034: Allow removal of V.A. if others exist.
unique = self.list_unique(PEOPLE)
# Order is important, for (unlikely case): multiple removals
for val in VARIOUS_ARTISTS_VALUES:
if len(unique) > 1 and val in unique:
unique.remove(val)
return "\n".join(unique) or default
elif key == "people:roles":
return (self._role_call("performer", PEOPLE)
or default)
elif key == "peoplesort":
return ("\n".join(self.list_unique(PEOPLE_SORT)) or
self("~people", default, connector))
elif key == "peoplesort:roles":
# Ignores non-sort tags if there are any sort tags (e.g. just
# returns "B" for {artist=A, performersort=B}).
# TODO: figure out the "correct" behavior for mixed sort tags
return (self._role_call("performersort", PEOPLE_SORT)
or self("~peoplesort", default, connector))
elif key in ("performers", "performer"):
return self._prefixvalue("performer") or default
elif key in ("performerssort", "performersort"):
return (self._prefixvalue("performersort") or
self("~" + key[-4:], default, connector))
elif key in ("performers:roles", "performer:roles"):
return (self._role_call("performer") or default)
elif key in ("performerssort:roles", "performersort:roles"):
return (self._role_call("performersort")
or self("~" + key.replace("sort", ""), default,
connector))
elif key == "basename":
return os.path.basename(self["~filename"]) or self["~filename"]
elif key == "dirname":
return os.path.dirname(self["~filename"]) or self["~filename"]
elif key == "uri":
try:
return self["~uri"]
except KeyError:
return fsn2uri(self["~filename"])
elif key == "format":
return self.get("~format", str(self.format))
elif key == "codec":
codec = self.get("~codec")
if codec is None:
return self("~format")
return codec
elif key == "encoding":
parts = filter(None,
[self.get("~encoding"), self.get("encodedby")])
encoding = u"\n".join(parts)
return encoding or default
elif key == "language":
codes = self.list("language")
if not codes:
return default
return u"\n".join(iso639.translate(c) or c for c in codes)
elif key == "bitrate":
return util.format_bitrate(self("~#bitrate"))
elif key == "#date":
date = self.get("date")
if date is None:
return default
return util.date_key(date)
elif key == "year":
return self.get("date", default)[:4]
elif key == "#year":
try:
return int(self.get("date", default)[:4])
except (ValueError, TypeError, KeyError):
return default
elif key == "originalyear":
return self.get("originaldate", default)[:4]
elif key == "#originalyear":
try:
return int(self.get("originaldate", default)[:4])
except (ValueError, TypeError, KeyError):
return default
elif key == "#tracks":
try:
return int(self["tracknumber"].split("/")[1])
except (ValueError, IndexError, TypeError, KeyError):
return default
elif key == "#discs":
try:
return int(self["discnumber"].split("/")[1])
except (ValueError, IndexError, TypeError, KeyError):
return default
elif key == "lyrics":
# First, try the embedded lyrics.
try:
return self["lyrics"]
except KeyError:
pass
try:
return self["unsyncedlyrics"]
except KeyError:
pass
# If there are no embedded lyrics, try to read them from
# the external file.
fn = self.lyric_filename
try:
fileobj = open(fn, "rb")
except EnvironmentError:
return default
else:
print_d("Reading lyrics from %s" % fn)
return fileobj.read().decode("utf-8", "replace")
elif key == "filesize":
return util.format_size(self("~#filesize", 0))
elif key == "playlists":
# See Issue 876
# Avoid circular references from formats/__init__.py
from quodlibet.util.collection import Playlist
playlists = Playlist.playlists_featuring(self)
return "\n".join([s.name for s in playlists]) or default
elif key.startswith("#replaygain_"):
try:
val = self.get(key[1:], default)
return round(float(val.split(" ")[0]), 2)
except (ValueError, TypeError, AttributeError):
return default
elif key[:1] == "#":
key = "~" + key
if key in self:
return self[key]
elif key in NUMERIC_ZERO_DEFAULT:
return 0
else:
try:
val = self[key[2:]]
except KeyError:
return default
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return default
else:
return dict.get(self, "~" + key, default)
elif key == "title":
title = dict.get(self, "title")
if title is None:
basename = self("~basename")
return "%s [%s]" % (
decode_value("~basename", basename), _("Unknown"))
else:
return title
elif key in SORT_TO_TAG:
try:
return self[key]
except KeyError:
key = SORT_TO_TAG[key]
return dict.get(self, key, default)
def _role_call(self, role_tag, sub_keys=None):
role_tag_keys = self.prefixkeys(role_tag)
role_map = {}
for key in role_tag_keys:
if key == role_tag:
# #2986: don't add a role description for the bare tag, unless
# this is a composite tag (e.g. only show "(Performance)" for
# ~people:roles and not ~performer:roles).
if sub_keys is None:
continue
else:
role = TAG_ROLES.get(role_tag, role_tag)
else:
role = key.split(":", 1)[-1]
for name in self.list(key):
role_map.setdefault(name, []).append(role)
if sub_keys is None:
names = self.list_unique(role_tag_keys)
else:
names = self.list_unique(sub_keys)
for tag in sub_keys:
if tag in TAG_ROLES:
for name in self.list(tag):
role_map.setdefault(name, []).append(TAG_ROLES[tag])
descs = []
for name in names:
roles = role_map.get(name, [])
if not roles:
descs.append(name)
else:
roles = sorted(map(capitalize, roles))
descs.append("%s (%s)" % (name, ", ".join(roles)))
return "\n".join(descs)
@property
def lyric_filename(self):
"""Returns the validated, or default, lyrics filename for this
file. User defined '[memory] lyric_rootpaths' and
'[memory] lyric_filenames' matches take precedence"""
from quodlibet.pattern \
import ArbitraryExtensionFileFromPattern as expand_patterns
rx_params = re.compile(r'[^\\]<[^' + re.escape(os.sep) + r']*[^\\]>')
def expand_pathfile(rpf):
"""Return the expanded RootPathFile"""
expanded = []
root = expanduser(rpf.root)
pathfile = expanduser(rpf.pathfile)
if rx_params.search(pathfile):
root = expand_patterns(root).format(self)
pathfile = expand_patterns(pathfile).format(self)
rpf = RootPathFile(root, pathfile)
expanded.append(rpf)
if not os.path.exists(pathfile) and is_windows():
# prioritise a special character encoded version
#
# most 'alien' chars are supported for 'nix fs paths, and we
# only pass the proposed path through 'escape_filename' (which
# apparently doesn't respect case) if we don't care about case!
#
# FIX: assumes 'nix build used on a case-sensitive fs, nt case
# insensitive. clearly this is not biting anyone though (yet!)
pathfile = os.path.sep.join([rpf.root, rpf.end_escaped])
rpf = RootPathFile(rpf.root, pathfile)
expanded.insert(len(expanded) - 1, rpf)
return expanded
def sanitise(sep, parts):
"""Return a santisied version of a path's parts"""
return sep.join(part.replace(os.path.sep, u'')[:128]
for part in parts)
# setup defaults (user-defined take precedence)
# root search paths
lyric_paths = \
config.getstringlist("memory", "lyric_rootpaths", [])
# ensure default paths
lyric_paths.append(os.path.join(get_home_dir(), ".lyrics"))
lyric_paths.append(
os.path.join(os.path.dirname(self.comma('~filename'))))
# search pathfile names
lyric_filenames = \
config.getstringlist("memory", "lyric_filenames", [])
# ensure some default pathfile names
lyric_filenames.append(
sanitise(os.sep, [(self.comma("lyricist") or
self.comma("artist")),
self.comma("title")]) + u'.lyric')
lyric_filenames.append(
sanitise(' - ', [(self.comma("lyricist") or
self.comma("artist")),
self.comma("title")]) + u'.lyric')
# generate all potential paths (unresolved/unexpanded)
pathfiles = OrderedDict()
for r in lyric_paths:
for f in lyric_filenames:
pathfile = os.path.join(r, os.path.dirname(f),
fsnative(os.path.basename(f)))
rpf = RootPathFile(r, pathfile)
if not pathfile in pathfiles:
pathfiles[pathfile] = rpf
#print_d("searching for lyrics in:\n%s" % '\n'.join(pathfiles.keys()))
# expand each raw pathfile in turn and test for existence
match_ = ""
pathfiles_expanded = OrderedDict()
for pf, rpf in pathfiles.items():
for rpf in expand_pathfile(rpf): # resolved as late as possible
pathfile = rpf.pathfile
pathfiles_expanded[pathfile] = rpf
if os.path.exists(pathfile):
match_ = pathfile
break
if match_ != "":
break
if not match_:
# search even harder!
lyric_extensions = ['lyric', 'lyrics', '', 'txt']
#print_d("extending search to extensions: %s" % lyric_extensions)
def generate_mod_ext_paths(pathfile):
# separate pathfile's extension (if any)
ext = os.path.splitext(pathfile)[1][1:]
path = pathfile[:-1 * len(ext)].strip('.') if ext else pathfile
# skip the proposed lyric extension if it is the same as
# the original for a given search pathfile stub - it has
# already been tested without success!
extra_extensions = [x for x in lyric_extensions if x != ext]
# join valid new extensions to pathfile stub and return
return ['.'.join([path, ext]) if ext else path
for ext in extra_extensions]
# look for a match by modifying the extension for each of the
# (now fully resolved) 'pathfiles_expanded' search items
for pathfile in pathfiles_expanded.keys():
# get alternatives for existence testing
paths_mod_ext = generate_mod_ext_paths(pathfile)
for path_ext in paths_mod_ext:
if os.path.exists(path_ext):
# persistence has paid off!
#print_d("extended search match!")
match_ = path_ext
break
if match_:
break
if not match_:
# default
match_ = list(pathfiles_expanded.keys())[0]
return match_
@property
def has_rating(self):
"""True if the song has a rating set.
In case this is False song('~#rating') would return the default value
"""
return self.get("~#rating") is not None
def remove_rating(self):
"""Removes the set rating so the default will be returned"""
self.pop("~#rating", None)
def comma(self, key):
"""Get all values of a tag, separated by commas. Synthetic
tags are supported, but will be slower. All list items
will be unicode.
If the value is numeric, that is returned rather than a list.
"""
if "~" in key or key == "title":
if key in FILESYSTEM_TAGS:
v = fsn2text(self(key, fsnative()))
else:
v = self(key, u"")
else:
v = self.get(key, u"")
if isinstance(v, (int, float)):
return v
else:
return v.replace("\n", ", ")
def list(self, key):
"""Get all values of a tag, as a list. Synthetic tags are supported,
but will be slower. Numeric tags will give their one value.
For file path keys the returned list might contain path items
(non-unicode).
An empty synthetic tag cannot be distinguished from a non-existent
synthetic tag; both result in [].
"""
if "~" in key or key == "title":
v = self(key)
if v == "":
return []
else:
return v.split("\n") if isinstance(v, str) else [v]
else:
v = self.get(key)
return [] if v is None else v.split("\n")
def list_sort(self, key):
"""Like list but return display,sort pairs when appropriate
and work on all tags.
In case no sort value exists the display one is returned. The sort
value is only an empty string if the display one is empty as well.
"""
display = decode_value(key, self(key))
display = display.split("\n") if display else []
sort = []
if key in TAG_TO_SORT:
sort = decode_value(TAG_TO_SORT[key],
self(TAG_TO_SORT[key]))
# it would be better to use something that doesn't fall back
# to the key itself, but what?
sort = sort.split("\n") if sort else []
result = []
for d, s in zip_longest(display, sort):
if d is not None:
result.append((d, (s if s is not None and s != "" else d)))
return result
def list_separate(self, key):
"""For tied tags return the list union of the display,sort values
otherwise just do list_sort
"""
if key[:1] == "~" and "~" in key[1:]: # tied tag
vals = [self.list_sort(tag) for tag in util.tagsplit(key)]
r = [j for i in vals for j in i]
return r
else:
return self.list_sort(key)
def list_unique(self, keys):
"""Returns a combined value of all values in keys; duplicate values
will be ignored.
Returns the same as list().
"""
l = []
seen = set()
for k in keys:
for v in self.list(k):
if v not in seen:
l.append(v)
seen.add(v)
return l
def as_lowercased(self):
"""Returns a new AudioFile with all keys lowercased / values merged.
Useful for tag writing for case insensitive tagging formats like
APEv2 or VorbisComment.
"""
merged = AudioFile()
text = {}
for key, value in self.items():
lower = key.lower()
if key.startswith("~#"):
merged[lower] = value
else:
text.setdefault(lower, []).extend(value.split("\n"))
for key, values in text.items():
merged[key] = "\n".join(values)
return merged
def exists(self):
"""Return true if the file still exists (or we can't tell)."""
return os.path.exists(self["~filename"])
def valid(self):
"""Return true if the file cache is up-to-date (checked via
mtime), or we can't tell."""
return (bool(self.get("~#mtime", 0)) and
self["~#mtime"] == mtime(self["~filename"]))
def mounted(self):
"""Return true if the disk the file is on is mounted, or
the file is not on a disk."""
return ismount(self.get("~mountpoint", "/"))
def can_multiple_values(self, key=None):
"""If no arguments are given, return a list of tags that can
have multiple values, or True if 'any' tags can.
"""
return True
def can_change(self, k=None):
"""See if this file supports changing the given tag. This may
be a limitation of the file type or QL's design.
The writing code should handle all kinds of keys, so this is
just a suggestion.
If no arguments are given, return a list of tags that can be
changed, or True if 'any' tags can be changed (specific tags
should be checked before adding)."""
if k is None:
return True
if not isascii(k):
return False
if not k or "=" in k or "~" in k:
return False
return True
def is_writable(self):
return os.access(self["~filename"], os.W_OK)
def rename(self, newname):
"""Rename a file. Errors are not handled. This shouldn't be used
directly; use library.rename instead."""
if os.path.isabs(newname):
mkdir(os.path.dirname(newname))
else:
newname = os.path.join(self('~dirname'), newname)
if not os.path.exists(newname):
shutil.move(self['~filename'], newname)
elif normalize_path(newname, canonicalise=True) != self['~filename']:
raise ValueError
self.sanitize(newname)
def sanitize(self, filename=None):
"""Fill in metadata defaults. Find ~mountpoint, ~#mtime, ~#filesize
and ~#added. Check for null bytes in tags.
Does not raise.
"""
# Replace nulls with newlines, trimming zero-length segments
for key, val in list(self.items()):
self[key] = val
if isinstance(val, str) and '\0' in val:
self[key] = '\n'.join(filter(lambda s: s, val.split('\0')))
# Remove unnecessary defaults
if key in NUMERIC_ZERO_DEFAULT and val == 0:
del self[key]
if filename:
self["~filename"] = filename
elif "~filename" not in self:
raise ValueError("Unknown filename!")
assert isinstance(self["~filename"], fsnative)
if self.is_file:
self["~filename"] = normalize_path(
self["~filename"], canonicalise=True)
# Find mount point (terminating at "/" if necessary)
head = self["~filename"]
while "~mountpoint" not in self:
head, tail = os.path.split(head)
# Prevent infinite loop without a fully-qualified filename
# (the unit tests use these).
head = head or fsnative(u"/")
if ismount(head):
self["~mountpoint"] = head
else:
self["~mountpoint"] = fsnative(u"/")
# Fill in necessary values.
self.setdefault("~#added", int(time.time()))
# For efficiency, do a single stat here. See Issue 504
try:
stat = os.stat(self['~filename'])
self["~#mtime"] = stat.st_mtime
self["~#filesize"] = stat.st_size
# Issue 342. This is a horrible approximation (due to headers) but
# on FLACs, the most common case, this should be close enough
if "~#bitrate" not in self:
try:
# kbps = bytes * 8 / seconds / 1000
self["~#bitrate"] = int(stat.st_size /
(self["~#length"] * (1000 / 8)))
except (KeyError, ZeroDivisionError):
pass
except OSError:
self["~#mtime"] = 0
def to_dump(self):
"""A string of 'key=value' lines, similar to vorbiscomment output.
Returns:
bytes
"""
def encode_key(k):
return encode(k) if isinstance(k, str) else k
s = []
for k in self.keys():
enc_key = encode_key(k)
assert isinstance(enc_key, bytes)
if isinstance(self[k], int):
l = enc_key + encode("=%d" % self[k])
s.append(l)
elif isinstance(self[k], float):
l = enc_key + encode("=%f" % self[k])
s.append(l)
else:
for v2 in self.list(k):
if not isinstance(v2, bytes):
v2 = encode(v2)
s.append(enc_key + b"=" + v2)
for k in (NUMERIC_ZERO_DEFAULT - set(self.keys())):
enc_key = encode_key(k)
l = enc_key + encode("=%d" % self.get(k, 0))
s.append(l)
if "~#rating" not in self:
s.append(encode("~#rating=%f" % self("~#rating")))
s.append(encode("~format=%s" % self.format))
s.append(b"")
return b"\n".join(s)
def from_dump(self, text):
"""Parses the text created with to_dump and adds the found tags.
Args:
text (bytes)
"""
for line in text.split(b"\n"):
if not line:
continue
parts = line.split(b"=")
key = decode(parts[0])
val = b"=".join(parts[1:])
if key == "~format":
pass
elif key in FILESYSTEM_TAGS:
self.add(key, bytes2fsn(val, "utf-8"))
elif key.startswith("~#"):
try:
self.add(key, int(val))
except ValueError:
try:
self.add(key, float(val))
except ValueError:
pass
else:
self.add(key, decode(val))
def change(self, key, old_value, new_value):
"""Change 'old_value' to 'new_value' for the given metadata key.
If the old value is not found, set the key to the new value."""
try:
parts = self.list(key)
try:
parts[parts.index(old_value)] = new_value
except ValueError:
self[key] = new_value
else:
self[key] = "\n".join(parts)
except KeyError:
self[key] = new_value
def add(self, key, value):
"""Add a value for the given metadata key."""
if key not in self:
self[key] = value
else:
self[key] += "\n" + value
def remove(self, key, value=None):
"""Remove a value from the given key.
If value is None remove all values for that key, if it exists.
If the key or value is not found do nothing.
"""
if key not in self:
return
elif value is None or self[key] == value:
del self[key]
else:
try:
parts = self.list(key)
parts.remove(value)
self[key] = u"\n".join(parts)
except ValueError:
pass
def replay_gain(self, profiles, pre_amp_gain=0, fallback_gain=0):
"""Return the computed Replay Gain scale factor.
profiles is a list of Replay Gain profile names ('album',
'track') to try before giving up. The special profile name
'none' will cause no scaling to occur. pre_amp_gain will be
applied before checking for clipping. fallback_gain will be
used when the song does not have replaygain information.
"""
for profile in profiles:
if profile == "none":
return 1.0
try:
db = float(self["replaygain_%s_gain" % profile].split()[0])
peak = float(self.get("replaygain_%s_peak" % profile, 1))
except (KeyError, ValueError, IndexError):
continue
else:
db += pre_amp_gain
try:
scale = 10. ** (db / 20)
except OverflowError:
scale = 1.0 / peak
else:
if scale * peak > 1:
scale = 1.0 / peak # don't clip
return min(15, scale)
else:
try:
scale = 10. ** ((fallback_gain + pre_amp_gain) / 20)
except OverflowError:
scale = 1.0
else:
if scale > 1:
scale = 1.0 # don't clip
return min(15, scale)
def write(self):
"""Write metadata back to the file.
Raises:
AudioFileError: in case writing fails
"""
raise NotImplementedError
@property
def bookmarks(self):
"""Parse and return song position bookmarks, or set them.
Accessing this returns a copy, so song.bookmarks.append(...)
will not work; you need to do
marks = song.bookmarks
marks.append(...)
song.bookmarks = marks
"""
marks = []
invalid = []
for line in self.list("~bookmark"):
try:
time, mark = line.split(" ", 1)
except:
invalid.append((-1, line))
else:
try:
time = util.parse_time(time, None)
except:
invalid.append((-1, line))
else:
if time >= 0:
marks.append((time, mark))
else:
invalid.append((-1, line))
marks.sort()
marks.extend(invalid)
return marks
@bookmarks.setter
def bookmarks(self, marks):
result = []
for time_, mark in marks:
if time_ < 0:
raise ValueError("mark times must be positive")
result.append(u"%s %s" % (util.format_time(time_), mark))
result = u"\n".join(result)
if result:
self["~bookmark"] = result
elif "~bookmark" in self:
del(self["~bookmark"])
# Looks like the real thing.
DUMMY_SONG = AudioFile({
'~#length': 234, '~filename': devnull,
'artist': 'The Artist', 'album': 'An Example Album',
'title': 'First Track', 'tracknumber': 1,
'date': '2010-12-31',
})
|
macressler/parallella-linux | refs/heads/main | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
bravominski/PennApps2015-HeartMates | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/distlib/__init__.py | 185 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import logging
__version__ = '0.1.8'
class DistlibException(Exception):
pass
try:
from logging import NullHandler
except ImportError: # pragma: no cover
class NullHandler(logging.Handler):
def handle(self, record): pass
def emit(self, record): pass
def createLock(self): self.lock = None
logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())
|
mahak/nova | refs/heads/master | nova/privsep/__init__.py | 7 | # Copyright 2016 Red Hat, Inc
# Copyright 2017 Rackspace Australia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Setup privsep decorator."""
from oslo_privsep import capabilities
from oslo_privsep import priv_context
sys_admin_pctxt = priv_context.PrivContext(
'nova',
cfg_section='nova_sys_admin',
pypath=__name__ + '.sys_admin_pctxt',
capabilities=[capabilities.CAP_CHOWN,
capabilities.CAP_DAC_OVERRIDE,
capabilities.CAP_DAC_READ_SEARCH,
capabilities.CAP_FOWNER,
capabilities.CAP_NET_ADMIN,
capabilities.CAP_SYS_ADMIN],
)
|
hurricanerix/swift | refs/heads/master | test/unit/common/middleware/test_tempauth.py | 5 | # -*- coding: utf-8 -*-
# Copyright (c) 2011-2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from contextlib import contextmanager
from base64 import b64encode
from time import time
import mock
from swift.common.middleware import tempauth as auth
from swift.common.middleware.acl import format_acl
from swift.common.swob import Request, Response
from swift.common.utils import split_path
NO_CONTENT_RESP = (('204 No Content', {}, ''),) # mock server response
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def set(self, key, value, time=0):
self.store[key] = value
return True
def incr(self, key, time=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
class FakeApp(object):
def __init__(self, status_headers_body_iter=None, acl=None, sync_key=None):
self.calls = 0
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', {}, '')])
self.acl = acl
self.sync_key = sync_key
def __call__(self, env, start_response):
self.calls += 1
self.request = Request(env)
if self.acl:
self.request.acl = self.acl
if self.sync_key:
self.request.environ['swift_sync_key'] = self.sync_key
if 'swift.authorize' in env:
resp = env['swift.authorize'](self.request)
if resp:
return resp(env, start_response)
status, headers, body = next(self.status_headers_body_iter)
return Response(status=status, headers=headers,
body=body)(env, start_response)
class FakeConn(object):
def __init__(self, status_headers_body_iter=None):
self.calls = 0
self.status_headers_body_iter = status_headers_body_iter
if not self.status_headers_body_iter:
self.status_headers_body_iter = iter([('404 Not Found', {}, '')])
def request(self, method, path, headers):
self.calls += 1
self.request_path = path
self.status, self.headers, self.body = \
next(self.status_headers_body_iter)
self.status, self.reason = self.status.split(' ', 1)
self.status = int(self.status)
def getresponse(self):
return self
def read(self):
body = self.body
self.body = ''
return body
class TestAuth(unittest.TestCase):
def setUp(self):
self.test_auth = auth.filter_factory({})(FakeApp())
def _make_request(self, path, **kwargs):
req = Request.blank(path, **kwargs)
req.environ['swift.cache'] = FakeMemcache()
return req
def test_reseller_prefix_init(self):
app = FakeApp()
ath = auth.filter_factory({})(app)
self.assertEqual(ath.reseller_prefix, 'AUTH_')
self.assertEqual(ath.reseller_prefixes, ['AUTH_'])
ath = auth.filter_factory({'reseller_prefix': 'TEST'})(app)
self.assertEqual(ath.reseller_prefix, 'TEST_')
self.assertEqual(ath.reseller_prefixes, ['TEST_'])
ath = auth.filter_factory({'reseller_prefix': 'TEST_'})(app)
self.assertEqual(ath.reseller_prefix, 'TEST_')
self.assertEqual(ath.reseller_prefixes, ['TEST_'])
ath = auth.filter_factory({'reseller_prefix': ''})(app)
self.assertEqual(ath.reseller_prefix, '')
self.assertEqual(ath.reseller_prefixes, [''])
ath = auth.filter_factory({'reseller_prefix': ' '})(app)
self.assertEqual(ath.reseller_prefix, '')
self.assertEqual(ath.reseller_prefixes, [''])
ath = auth.filter_factory({'reseller_prefix': ' '' '})(app)
self.assertEqual(ath.reseller_prefix, '')
self.assertEqual(ath.reseller_prefixes, [''])
ath = auth.filter_factory({'reseller_prefix': " '', TEST"})(app)
self.assertEqual(ath.reseller_prefix, '')
self.assertTrue('' in ath.reseller_prefixes)
self.assertTrue('TEST_' in ath.reseller_prefixes)
def test_auth_prefix_init(self):
app = FakeApp()
ath = auth.filter_factory({})(app)
self.assertEqual(ath.auth_prefix, '/auth/')
ath = auth.filter_factory({'auth_prefix': ''})(app)
self.assertEqual(ath.auth_prefix, '/auth/')
ath = auth.filter_factory({'auth_prefix': '/'})(app)
self.assertEqual(ath.auth_prefix, '/auth/')
ath = auth.filter_factory({'auth_prefix': '/test/'})(app)
self.assertEqual(ath.auth_prefix, '/test/')
ath = auth.filter_factory({'auth_prefix': '/test'})(app)
self.assertEqual(ath.auth_prefix, '/test/')
ath = auth.filter_factory({'auth_prefix': 'test/'})(app)
self.assertEqual(ath.auth_prefix, '/test/')
ath = auth.filter_factory({'auth_prefix': 'test'})(app)
self.assertEqual(ath.auth_prefix, '/test/')
def test_top_level_deny(self):
req = self._make_request('/')
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(req.environ['swift.authorize'],
self.test_auth.denied_response)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="unknown"')
def test_anon(self):
req = self._make_request('/v1/AUTH_account')
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(req.environ['swift.authorize'],
self.test_auth.authorize)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="AUTH_account"')
def test_anon_badpath(self):
req = self._make_request('/v1')
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="unknown"')
def test_override_asked_for_but_not_allowed(self):
self.test_auth = \
auth.filter_factory({'allow_overrides': 'false'})(FakeApp())
req = self._make_request('/v1/AUTH_account',
environ={'swift.authorize_override': True})
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="AUTH_account"')
self.assertEqual(req.environ['swift.authorize'],
self.test_auth.authorize)
def test_override_asked_for_and_allowed(self):
self.test_auth = \
auth.filter_factory({'allow_overrides': 'true'})(FakeApp())
req = self._make_request('/v1/AUTH_account',
environ={'swift.authorize_override': True})
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 404)
self.assertTrue('swift.authorize' not in req.environ)
def test_override_default_allowed(self):
req = self._make_request('/v1/AUTH_account',
environ={'swift.authorize_override': True})
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 404)
self.assertTrue('swift.authorize' not in req.environ)
def test_auth_deny_non_reseller_prefix(self):
req = self._make_request('/v1/BLAH_account',
headers={'X-Auth-Token': 'BLAH_t'})
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="BLAH_account"')
self.assertEqual(req.environ['swift.authorize'],
self.test_auth.denied_response)
def test_auth_deny_non_reseller_prefix_no_override(self):
fake_authorize = lambda x: Response(status='500 Fake')
req = self._make_request('/v1/BLAH_account',
headers={'X-Auth-Token': 'BLAH_t'},
environ={'swift.authorize': fake_authorize}
)
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 500)
self.assertEqual(req.environ['swift.authorize'], fake_authorize)
def test_auth_no_reseller_prefix_deny(self):
# Ensures that when we have no reseller prefix, we don't deny a request
# outright but set up a denial swift.authorize and pass the request on
# down the chain.
local_app = FakeApp()
local_auth = auth.filter_factory({'reseller_prefix': ''})(local_app)
req = self._make_request('/v1/account',
headers={'X-Auth-Token': 't'})
resp = req.get_response(local_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="account"')
self.assertEqual(local_app.calls, 1)
self.assertEqual(req.environ['swift.authorize'],
local_auth.denied_response)
def test_auth_reseller_prefix_with_s3_deny(self):
# Ensures that when we have a reseller prefix and using a middleware
# relying on Http-Authorization (for example swift3), we don't deny a
# request outright but set up a denial swift.authorize and pass the
# request on down the chain.
local_app = FakeApp()
local_auth = auth.filter_factory({'reseller_prefix': 'PRE'})(local_app)
req = self._make_request('/v1/account',
headers={'X-Auth-Token': 't',
'Authorization': 'AWS user:pw'})
resp = req.get_response(local_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(local_app.calls, 1)
self.assertEqual(req.environ['swift.authorize'],
local_auth.denied_response)
def test_auth_with_s3_authorization(self):
local_app = FakeApp()
local_auth = auth.filter_factory(
{'user_s3_s3': 's3 .admin'})(local_app)
req = self._make_request('/v1/AUTH_s3',
headers={'X-Auth-Token': 't',
'AUTHORIZATION': 'AWS s3:s3:pass'})
with mock.patch('base64.urlsafe_b64decode') as msg, \
mock.patch('base64.encodestring') as sign:
msg.return_value = ''
sign.return_value = 'pass'
resp = req.get_response(local_auth)
self.assertEqual(resp.status_int, 404)
self.assertEqual(local_app.calls, 1)
self.assertEqual(req.environ['swift.authorize'],
local_auth.authorize)
def test_auth_no_reseller_prefix_no_token(self):
# Check that normally we set up a call back to our authorize.
local_auth = auth.filter_factory({'reseller_prefix': ''})(FakeApp())
req = self._make_request('/v1/account')
resp = req.get_response(local_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="account"')
self.assertEqual(req.environ['swift.authorize'],
local_auth.authorize)
# Now make sure we don't override an existing swift.authorize when we
# have no reseller prefix.
local_auth = \
auth.filter_factory({'reseller_prefix': ''})(FakeApp())
local_authorize = lambda req: Response('test')
req = self._make_request('/v1/account', environ={'swift.authorize':
local_authorize})
resp = req.get_response(local_auth)
self.assertEqual(req.environ['swift.authorize'], local_authorize)
self.assertEqual(resp.status_int, 200)
def test_auth_fail(self):
resp = self._make_request(
'/v1/AUTH_cfa',
headers={'X-Auth-Token': 'AUTH_t'}).get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="AUTH_cfa"')
def test_authorize_bad_path(self):
req = self._make_request('/badpath')
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="unknown"')
req = self._make_request('/badpath')
req.remote_user = 'act:usr,act,AUTH_cfa'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
def test_authorize_account_access(self):
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act,AUTH_cfa'
self.assertEqual(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
def test_authorize_acl_group_access(self):
self.test_auth = auth.filter_factory({})(
FakeApp(iter(NO_CONTENT_RESP * 3)))
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act'
self.assertEqual(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act:usr'
self.assertEqual(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act2'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
req = self._make_request('/v1/AUTH_cfa')
req.remote_user = 'act:usr,act'
req.acl = 'act:usr2'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
def test_deny_cross_reseller(self):
# Tests that cross-reseller is denied, even if ACLs/group names match
req = self._make_request('/v1/OTHER_cfa')
req.remote_user = 'act:usr,act,AUTH_cfa'
req.acl = 'act'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
def test_authorize_acl_referer_after_user_groups(self):
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr'
req.acl = '.r:*,act:usr'
self.assertEqual(self.test_auth.authorize(req), None)
def test_authorize_acl_referrer_access(self):
self.test_auth = auth.filter_factory({})(
FakeApp(iter(NO_CONTENT_RESP * 6)))
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr,act'
req.acl = '.r:*,.rlistings'
self.assertEqual(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr,act'
req.acl = '.r:*' # No listings allowed
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr,act'
req.acl = '.r:.example.com,.rlistings'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
req = self._make_request('/v1/AUTH_cfa/c')
req.remote_user = 'act:usr,act'
req.referer = 'http://www.example.com/index.html'
req.acl = '.r:.example.com,.rlistings'
self.assertEqual(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa/c')
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="AUTH_cfa"')
req = self._make_request('/v1/AUTH_cfa/c')
req.acl = '.r:*,.rlistings'
self.assertEqual(self.test_auth.authorize(req), None)
req = self._make_request('/v1/AUTH_cfa/c')
req.acl = '.r:*' # No listings allowed
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="AUTH_cfa"')
req = self._make_request('/v1/AUTH_cfa/c')
req.acl = '.r:.example.com,.rlistings'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="AUTH_cfa"')
req = self._make_request('/v1/AUTH_cfa/c')
req.referer = 'http://www.example.com/index.html'
req.acl = '.r:.example.com,.rlistings'
self.assertEqual(self.test_auth.authorize(req), None)
def test_detect_reseller_request(self):
req = self._make_request('/v1/AUTH_admin',
headers={'X-Auth-Token': 'AUTH_t'})
cache_key = 'AUTH_/token/AUTH_t'
cache_entry = (time() + 3600, '.reseller_admin')
req.environ['swift.cache'].set(cache_key, cache_entry)
req.get_response(self.test_auth)
self.assertTrue(req.environ.get('reseller_request', False))
def test_account_put_permissions(self):
self.test_auth = auth.filter_factory({})(
FakeApp(iter(NO_CONTENT_RESP * 4)))
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,AUTH_other'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
# Even PUTs to your own account as account admin should fail
req = self._make_request('/v1/AUTH_old',
environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,AUTH_old'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,.reseller_admin'
resp = self.test_auth.authorize(req)
self.assertEqual(resp, None)
# .super_admin is not something the middleware should ever see or care
# about
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'PUT'})
req.remote_user = 'act:usr,act,.super_admin'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
def test_account_delete_permissions(self):
self.test_auth = auth.filter_factory({})(
FakeApp(iter(NO_CONTENT_RESP * 4)))
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,AUTH_other'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
# Even DELETEs to your own account as account admin should fail
req = self._make_request('/v1/AUTH_old',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,AUTH_old'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,.reseller_admin'
resp = self.test_auth.authorize(req)
self.assertEqual(resp, None)
# .super_admin is not something the middleware should ever see or care
# about
req = self._make_request('/v1/AUTH_new',
environ={'REQUEST_METHOD': 'DELETE'})
req.remote_user = 'act:usr,act,.super_admin'
resp = self.test_auth.authorize(req)
self.assertEqual(resp.status_int, 403)
def test_get_token_success(self):
# Example of how to simulate the auth transaction
test_auth = auth.filter_factory({'user_ac_user': 'testing'})(FakeApp())
req = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'ac:user', 'X-Auth-Key': 'testing'})
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 200)
self.assertTrue(resp.headers['x-storage-url'].endswith('/v1/AUTH_ac'))
self.assertTrue(resp.headers['x-auth-token'].startswith('AUTH_'))
self.assertEqual(resp.headers['x-auth-token'],
resp.headers['x-storage-token'])
self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']),
auth.DEFAULT_TOKEN_LIFE - 0.5, delta=0.5)
self.assertGreater(len(resp.headers['x-auth-token']), 10)
def test_get_token_success_other_auth_prefix(self):
test_auth = auth.filter_factory({'user_ac_user': 'testing',
'auth_prefix': '/other/'})(FakeApp())
req = self._make_request(
'/other/v1.0',
headers={'X-Auth-User': 'ac:user', 'X-Auth-Key': 'testing'})
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 200)
self.assertTrue(resp.headers['x-storage-url'].endswith('/v1/AUTH_ac'))
self.assertTrue(resp.headers['x-auth-token'].startswith('AUTH_'))
self.assertTrue(len(resp.headers['x-auth-token']) > 10)
def test_use_token_success(self):
# Example of how to simulate an authorized request
test_auth = auth.filter_factory({'user_acct_user': 'testing'})(
FakeApp(iter(NO_CONTENT_RESP * 1)))
req = self._make_request('/v1/AUTH_acct',
headers={'X-Auth-Token': 'AUTH_t'})
cache_key = 'AUTH_/token/AUTH_t'
cache_entry = (time() + 3600, 'AUTH_acct')
req.environ['swift.cache'].set(cache_key, cache_entry)
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
def test_get_token_fail(self):
resp = self._make_request('/auth/v1.0').get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="unknown"')
resp = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'act:usr',
'X-Auth-Key': 'key'}).get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="act"')
def test_get_token_fail_invalid_x_auth_user_format(self):
resp = self._make_request(
'/auth/v1/act/auth',
headers={'X-Auth-User': 'usr',
'X-Auth-Key': 'key'}).get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="act"')
def test_get_token_fail_non_matching_account_in_request(self):
resp = self._make_request(
'/auth/v1/act/auth',
headers={'X-Auth-User': 'act2:usr',
'X-Auth-Key': 'key'}).get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="act"')
def test_get_token_fail_bad_path(self):
resp = self._make_request(
'/auth/v1/act/auth/invalid',
headers={'X-Auth-User': 'act:usr',
'X-Auth-Key': 'key'}).get_response(self.test_auth)
self.assertEqual(resp.status_int, 400)
def test_get_token_fail_missing_key(self):
resp = self._make_request(
'/auth/v1/act/auth',
headers={'X-Auth-User': 'act:usr'}).get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="act"')
def test_object_name_containing_slash(self):
test_auth = auth.filter_factory({'user_acct_user': 'testing'})(
FakeApp(iter(NO_CONTENT_RESP * 1)))
req = self._make_request('/v1/AUTH_acct/cont/obj/name/with/slash',
headers={'X-Auth-Token': 'AUTH_t'})
cache_key = 'AUTH_/token/AUTH_t'
cache_entry = (time() + 3600, 'AUTH_acct')
req.environ['swift.cache'].set(cache_key, cache_entry)
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
def test_storage_url_default(self):
self.test_auth = \
auth.filter_factory({'user_test_tester': 'testing'})(FakeApp())
req = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})
del req.environ['HTTP_HOST']
req.environ['SERVER_NAME'] = 'bob'
req.environ['SERVER_PORT'] = '1234'
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-storage-url'],
'http://bob:1234/v1/AUTH_test')
def test_storage_url_based_on_host(self):
self.test_auth = \
auth.filter_factory({'user_test_tester': 'testing'})(FakeApp())
req = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})
req.environ['HTTP_HOST'] = 'somehost:5678'
req.environ['SERVER_NAME'] = 'bob'
req.environ['SERVER_PORT'] = '1234'
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-storage-url'],
'http://somehost:5678/v1/AUTH_test')
def test_storage_url_overridden_scheme(self):
self.test_auth = \
auth.filter_factory({'user_test_tester': 'testing',
'storage_url_scheme': 'fake'})(FakeApp())
req = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})
req.environ['HTTP_HOST'] = 'somehost:5678'
req.environ['SERVER_NAME'] = 'bob'
req.environ['SERVER_PORT'] = '1234'
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-storage-url'],
'fake://somehost:5678/v1/AUTH_test')
def test_use_old_token_from_memcached(self):
self.test_auth = \
auth.filter_factory({'user_test_tester': 'testing',
'storage_url_scheme': 'fake'})(FakeApp())
req = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})
req.environ['HTTP_HOST'] = 'somehost:5678'
req.environ['SERVER_NAME'] = 'bob'
req.environ['SERVER_PORT'] = '1234'
req.environ['swift.cache'].set('AUTH_/user/test:tester', 'uuid_token')
expires = time() + 180
req.environ['swift.cache'].set('AUTH_/token/uuid_token',
(expires, 'test,test:tester'))
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['x-auth-token'], 'uuid_token')
self.assertEqual(resp.headers['x-auth-token'],
resp.headers['x-storage-token'])
self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']),
179.5, delta=0.5)
def test_old_token_overdate(self):
self.test_auth = \
auth.filter_factory({'user_test_tester': 'testing',
'storage_url_scheme': 'fake'})(FakeApp())
req = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})
req.environ['HTTP_HOST'] = 'somehost:5678'
req.environ['SERVER_NAME'] = 'bob'
req.environ['SERVER_PORT'] = '1234'
req.environ['swift.cache'].set('AUTH_/user/test:tester', 'uuid_token')
req.environ['swift.cache'].set('AUTH_/token/uuid_token',
(0, 'test,test:tester'))
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 200)
self.assertNotEqual(resp.headers['x-auth-token'], 'uuid_token')
self.assertEqual(resp.headers['x-auth-token'][:7], 'AUTH_tk')
self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']),
auth.DEFAULT_TOKEN_LIFE - 0.5, delta=0.5)
def test_old_token_with_old_data(self):
self.test_auth = \
auth.filter_factory({'user_test_tester': 'testing',
'storage_url_scheme': 'fake'})(FakeApp())
req = self._make_request(
'/auth/v1.0',
headers={'X-Auth-User': 'test:tester', 'X-Auth-Key': 'testing'})
req.environ['HTTP_HOST'] = 'somehost:5678'
req.environ['SERVER_NAME'] = 'bob'
req.environ['SERVER_PORT'] = '1234'
req.environ['swift.cache'].set('AUTH_/user/test:tester', 'uuid_token')
req.environ['swift.cache'].set('AUTH_/token/uuid_token',
(time() + 99, 'test,test:tester,.role'))
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 200)
self.assertNotEqual(resp.headers['x-auth-token'], 'uuid_token')
self.assertEqual(resp.headers['x-auth-token'][:7], 'AUTH_tk')
self.assertAlmostEqual(int(resp.headers['x-auth-token-expires']),
auth.DEFAULT_TOKEN_LIFE - 0.5, delta=0.5)
def test_reseller_admin_is_owner(self):
orig_authorize = self.test_auth.authorize
owner_values = []
def mitm_authorize(req):
rv = orig_authorize(req)
owner_values.append(req.environ.get('swift_owner', False))
return rv
self.test_auth.authorize = mitm_authorize
req = self._make_request('/v1/AUTH_cfa',
headers={'X-Auth-Token': 'AUTH_t'})
req.remote_user = '.reseller_admin'
self.test_auth.authorize(req)
self.assertEqual(owner_values, [True])
def test_admin_is_owner(self):
orig_authorize = self.test_auth.authorize
owner_values = []
def mitm_authorize(req):
rv = orig_authorize(req)
owner_values.append(req.environ.get('swift_owner', False))
return rv
self.test_auth.authorize = mitm_authorize
req = self._make_request(
'/v1/AUTH_cfa',
headers={'X-Auth-Token': 'AUTH_t'})
req.remote_user = 'AUTH_cfa'
self.test_auth.authorize(req)
self.assertEqual(owner_values, [True])
def test_regular_is_not_owner(self):
orig_authorize = self.test_auth.authorize
owner_values = []
def mitm_authorize(req):
rv = orig_authorize(req)
owner_values.append(req.environ.get('swift_owner', False))
return rv
self.test_auth.authorize = mitm_authorize
req = self._make_request(
'/v1/AUTH_cfa/c',
headers={'X-Auth-Token': 'AUTH_t'})
req.remote_user = 'act:usr'
self.test_auth.authorize(req)
self.assertEqual(owner_values, [False])
def test_sync_request_success(self):
self.test_auth.app = FakeApp(iter(NO_CONTENT_RESP * 1),
sync_key='secret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret',
'x-timestamp': '123.456'})
req.remote_addr = '127.0.0.1'
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 204)
def test_sync_request_fail_key(self):
self.test_auth.app = FakeApp(sync_key='secret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'wrongsecret',
'x-timestamp': '123.456'})
req.remote_addr = '127.0.0.1'
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="AUTH_cfa"')
self.test_auth.app = FakeApp(sync_key='othersecret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret',
'x-timestamp': '123.456'})
req.remote_addr = '127.0.0.1'
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="AUTH_cfa"')
self.test_auth.app = FakeApp(sync_key=None)
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret',
'x-timestamp': '123.456'})
req.remote_addr = '127.0.0.1'
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="AUTH_cfa"')
def test_sync_request_fail_no_timestamp(self):
self.test_auth.app = FakeApp(sync_key='secret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret'})
req.remote_addr = '127.0.0.1'
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="AUTH_cfa"')
def test_sync_request_success_lb_sync_host(self):
self.test_auth.app = FakeApp(iter(NO_CONTENT_RESP * 1),
sync_key='secret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret',
'x-timestamp': '123.456',
'x-forwarded-for': '127.0.0.1'})
req.remote_addr = '127.0.0.2'
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 204)
self.test_auth.app = FakeApp(iter(NO_CONTENT_RESP * 1),
sync_key='secret')
req = self._make_request(
'/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'x-container-sync-key': 'secret',
'x-timestamp': '123.456',
'x-cluster-client-ip': '127.0.0.1'})
req.remote_addr = '127.0.0.2'
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 204)
def test_options_call(self):
req = self._make_request('/v1/AUTH_cfa/c/o',
environ={'REQUEST_METHOD': 'OPTIONS'})
resp = self.test_auth.authorize(req)
self.assertEqual(resp, None)
def test_get_user_group(self):
# More tests in TestGetUserGroups class
app = FakeApp()
ath = auth.filter_factory({})(app)
ath.users = {'test:tester': {'groups': ['.admin']}}
groups = ath._get_user_groups('test', 'test:tester', 'AUTH_test')
self.assertEqual(groups, 'test,test:tester,AUTH_test')
ath.users = {'test:tester': {'groups': []}}
groups = ath._get_user_groups('test', 'test:tester', 'AUTH_test')
self.assertEqual(groups, 'test,test:tester')
def test_auth_scheme(self):
req = self._make_request('/v1/BLAH_account',
headers={'X-Auth-Token': 'BLAH_t'})
resp = req.get_response(self.test_auth)
self.assertEqual(resp.status_int, 401)
self.assertTrue('Www-Authenticate' in resp.headers)
self.assertEqual(resp.headers.get('Www-Authenticate'),
'Swift realm="BLAH_account"')
class TestAuthWithMultiplePrefixes(TestAuth):
"""
Repeats all tests in TestAuth except adds multiple
reseller_prefix items
"""
def setUp(self):
self.test_auth = auth.filter_factory(
{'reseller_prefix': 'AUTH_, SOMEOTHER_, YETANOTHER_'})(FakeApp())
class TestGetUserGroups(unittest.TestCase):
def test_custom_url_config(self):
app = FakeApp()
ath = auth.filter_factory({
'user_test_tester':
'testing .admin http://saio:8080/v1/AUTH_monkey'})(app)
groups = ath._get_user_groups('test', 'test:tester', 'AUTH_monkey')
self.assertEqual(groups, 'test,test:tester,AUTH_test,AUTH_monkey')
def test_no_prefix_reseller(self):
app = FakeApp()
ath = auth.filter_factory({'reseller_prefix': ''})(app)
ath.users = {'test:tester': {'groups': ['.admin']}}
groups = ath._get_user_groups('test', 'test:tester', 'test')
self.assertEqual(groups, 'test,test:tester')
ath.users = {'test:tester': {'groups': []}}
groups = ath._get_user_groups('test', 'test:tester', 'test')
self.assertEqual(groups, 'test,test:tester')
def test_single_reseller(self):
app = FakeApp()
ath = auth.filter_factory({})(app)
ath.users = {'test:tester': {'groups': ['.admin']}}
groups = ath._get_user_groups('test', 'test:tester', 'AUTH_test')
self.assertEqual(groups, 'test,test:tester,AUTH_test')
ath.users = {'test:tester': {'groups': []}}
groups = ath._get_user_groups('test', 'test:tester', 'AUTH_test')
self.assertEqual(groups, 'test,test:tester')
def test_multiple_reseller(self):
app = FakeApp()
ath = auth.filter_factory(
{'reseller_prefix': 'AUTH_, SOMEOTHER_, YETANOTHER_'})(app)
self.assertEqual(ath.reseller_prefixes, ['AUTH_', 'SOMEOTHER_',
'YETANOTHER_'])
ath.users = {'test:tester': {'groups': ['.admin']}}
groups = ath._get_user_groups('test', 'test:tester', 'AUTH_test')
self.assertEqual(groups,
'test,test:tester,AUTH_test,'
'SOMEOTHER_test,YETANOTHER_test')
ath.users = {'test:tester': {'groups': []}}
groups = ath._get_user_groups('test', 'test:tester', 'AUTH_test')
self.assertEqual(groups, 'test,test:tester')
class TestDefinitiveAuth(unittest.TestCase):
def setUp(self):
self.test_auth = auth.filter_factory(
{'reseller_prefix': 'AUTH_, SOMEOTHER_'})(FakeApp())
def test_noreseller_prefix(self):
ath = auth.filter_factory({'reseller_prefix': ''})(FakeApp())
result = ath._is_definitive_auth(path='/v1/test')
self.assertEqual(result, False)
result = ath._is_definitive_auth(path='/v1/AUTH_test')
self.assertEqual(result, False)
result = ath._is_definitive_auth(path='/v1/BLAH_test')
self.assertEqual(result, False)
def test_blank_prefix(self):
ath = auth.filter_factory({'reseller_prefix':
" '', SOMEOTHER"})(FakeApp())
result = ath._is_definitive_auth(path='/v1/test')
self.assertEqual(result, False)
result = ath._is_definitive_auth(path='/v1/SOMEOTHER_test')
self.assertEqual(result, True)
result = ath._is_definitive_auth(path='/v1/SOMEOTHERtest')
self.assertEqual(result, False)
def test_default_prefix(self):
ath = auth.filter_factory({})(FakeApp())
result = ath._is_definitive_auth(path='/v1/AUTH_test')
self.assertEqual(result, True)
result = ath._is_definitive_auth(path='/v1/BLAH_test')
self.assertEqual(result, False)
ath = auth.filter_factory({'reseller_prefix': 'AUTH'})(FakeApp())
result = ath._is_definitive_auth(path='/v1/AUTH_test')
self.assertEqual(result, True)
result = ath._is_definitive_auth(path='/v1/BLAH_test')
self.assertEqual(result, False)
def test_multiple_prefixes(self):
ath = auth.filter_factory({'reseller_prefix':
'AUTH, SOMEOTHER'})(FakeApp())
result = ath._is_definitive_auth(path='/v1/AUTH_test')
self.assertEqual(result, True)
result = ath._is_definitive_auth(path='/v1/SOMEOTHER_test')
self.assertEqual(result, True)
result = ath._is_definitive_auth(path='/v1/BLAH_test')
self.assertEqual(result, False)
class TestParseUserCreation(unittest.TestCase):
def test_parse_user_creation(self):
auth_filter = auth.filter_factory({
'reseller_prefix': 'ABC',
'user_test_tester3': 'testing',
'user_has_url': 'urlly .admin http://a.b/v1/DEF_has',
'user_admin_admin': 'admin .admin .reseller_admin',
})(FakeApp())
self.assertEqual(auth_filter.users, {
'admin:admin': {
'url': '$HOST/v1/ABC_admin',
'groups': ['.admin', '.reseller_admin'],
'key': 'admin'
}, 'test:tester3': {
'url': '$HOST/v1/ABC_test',
'groups': [],
'key': 'testing'
}, 'has:url': {
'url': 'http://a.b/v1/DEF_has',
'groups': ['.admin'],
'key': 'urlly'
},
})
def test_base64_encoding(self):
auth_filter = auth.filter_factory({
'reseller_prefix': 'ABC',
'user64_%s_%s' % (
b64encode('test').rstrip('='),
b64encode('tester3').rstrip('=')):
'testing .reseller_admin',
'user64_%s_%s' % (
b64encode('user_foo').rstrip('='),
b64encode('ab').rstrip('=')):
'urlly .admin http://a.b/v1/DEF_has',
})(FakeApp())
self.assertEqual(auth_filter.users, {
'test:tester3': {
'url': '$HOST/v1/ABC_test',
'groups': ['.reseller_admin'],
'key': 'testing'
}, 'user_foo:ab': {
'url': 'http://a.b/v1/DEF_has',
'groups': ['.admin'],
'key': 'urlly'
},
})
def test_key_with_no_value(self):
self.assertRaises(ValueError, auth.filter_factory({
'user_test_tester3': 'testing',
'user_bob_bobby': '',
'user_admin_admin': 'admin .admin .reseller_admin',
}), FakeApp())
class TestAccountAcls(unittest.TestCase):
"""
These tests use a single reseller prefix (AUTH_) and the
target paths are /v1/AUTH_<blah>
"""
def setUp(self):
self.reseller_prefix = {}
self.accpre = 'AUTH'
def _make_request(self, path, **kwargs):
# Our TestAccountAcls default request will have a valid auth token
version, acct, _ = split_path(path, 1, 3, True)
headers = kwargs.pop('headers', {'X-Auth-Token': 'AUTH_t'})
user_groups = kwargs.pop('user_groups', 'AUTH_firstacct')
# The account being accessed will have account ACLs
acl = {'admin': ['AUTH_admin'], 'read-write': ['AUTH_rw'],
'read-only': ['AUTH_ro']}
header_data = {'core-access-control':
format_acl(version=2, acl_dict=acl)}
acls = kwargs.pop('acls', header_data)
req = Request.blank(path, headers=headers, **kwargs)
# Authorize the token by populating the request's cache
req.environ['swift.cache'] = FakeMemcache()
cache_key = 'AUTH_/token/AUTH_t'
cache_entry = (time() + 3600, user_groups)
req.environ['swift.cache'].set(cache_key, cache_entry)
# Pretend get_account_info returned ACLs in sysmeta, and we cached that
cache_key = 'account/%s' % acct
cache_entry = {'sysmeta': acls}
req.environ['swift.cache'].set(cache_key, cache_entry)
return req
def _conf(self, moreconf):
conf = self.reseller_prefix
conf.update(moreconf)
return conf
def test_account_acl_success(self):
test_auth = auth.filter_factory(
self._conf({'user_admin_user': 'testing'}))(
FakeApp(iter(NO_CONTENT_RESP * 1)))
# admin (not a swift admin) wants to read from otheracct
req = self._make_request('/v1/%s_otheract' % self.accpre,
user_groups="AUTH_admin")
# The request returned by _make_request should be allowed
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
def test_account_acl_failures(self):
test_auth = auth.filter_factory(
self._conf({'user_admin_user': 'testing'}))(
FakeApp())
# If I'm not authed as anyone on the ACLs, I shouldn't get in
req = self._make_request('/v1/%s_otheract' % self.accpre,
user_groups="AUTH_bob")
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 403)
# If the target account has no ACLs, a non-owner shouldn't get in
req = self._make_request('/v1/%s_otheract' % self.accpre,
user_groups="AUTH_admin",
acls={})
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 403)
def test_admin_privileges(self):
test_auth = auth.filter_factory(
self._conf({'user_admin_user': 'testing'}))(
FakeApp(iter(NO_CONTENT_RESP * 18)))
for target in (
'/v1/%s_otheracct' % self.accpre,
'/v1/%s_otheracct/container' % self.accpre,
'/v1/%s_otheracct/container/obj' % self.accpre):
for method in ('GET', 'HEAD', 'OPTIONS', 'PUT', 'POST', 'DELETE'):
# Admin ACL user can do anything
req = self._make_request(target, user_groups="AUTH_admin",
environ={'REQUEST_METHOD': method})
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
# swift_owner should be set to True
if method != 'OPTIONS':
self.assertTrue(req.environ.get('swift_owner'))
def test_readwrite_privileges(self):
test_auth = auth.filter_factory(
self._conf({'user_rw_user': 'testing'}))(
FakeApp(iter(NO_CONTENT_RESP * 15)))
for target in ('/v1/%s_otheracct' % self.accpre,):
for method in ('GET', 'HEAD', 'OPTIONS'):
# Read-Write user can read account data
req = self._make_request(target, user_groups="AUTH_rw",
environ={'REQUEST_METHOD': method})
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
# swift_owner should NOT be set to True
self.assertFalse(req.environ.get('swift_owner'))
# RW user should NOT be able to PUT, POST, or DELETE to the account
for method in ('PUT', 'POST', 'DELETE'):
req = self._make_request(target, user_groups="AUTH_rw",
environ={'REQUEST_METHOD': method})
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 403)
# RW user should be able to GET, PUT, POST, or DELETE to containers
# and objects
for target in ('/v1/%s_otheracct/c' % self.accpre,
'/v1/%s_otheracct/c/o' % self.accpre):
for method in ('GET', 'HEAD', 'OPTIONS', 'PUT', 'POST', 'DELETE'):
req = self._make_request(target, user_groups="AUTH_rw",
environ={'REQUEST_METHOD': method})
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
def test_readonly_privileges(self):
test_auth = auth.filter_factory(
self._conf({'user_ro_user': 'testing'}))(
FakeApp(iter(NO_CONTENT_RESP * 9)))
# ReadOnly user should NOT be able to PUT, POST, or DELETE to account,
# container, or object
for target in ('/v1/%s_otheracct' % self.accpre,
'/v1/%s_otheracct/cont' % self.accpre,
'/v1/%s_otheracct/cont/obj' % self.accpre):
for method in ('GET', 'HEAD', 'OPTIONS'):
req = self._make_request(target, user_groups="AUTH_ro",
environ={'REQUEST_METHOD': method})
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
# swift_owner should NOT be set to True for the ReadOnly ACL
self.assertFalse(req.environ.get('swift_owner'))
for method in ('PUT', 'POST', 'DELETE'):
req = self._make_request(target, user_groups="AUTH_ro",
environ={'REQUEST_METHOD': method})
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 403)
# swift_owner should NOT be set to True for the ReadOnly ACL
self.assertFalse(req.environ.get('swift_owner'))
def test_user_gets_best_acl(self):
test_auth = auth.filter_factory(
self._conf({'user_acct_username': 'testing'}))(
FakeApp(iter(NO_CONTENT_RESP * 18)))
mygroups = "AUTH_acct,AUTH_ro,AUTH_something,AUTH_admin"
for target in ('/v1/%s_otheracct' % self.accpre,
'/v1/%s_otheracct/container' % self.accpre,
'/v1/%s_otheracct/container/obj' % self.accpre):
for method in ('GET', 'HEAD', 'OPTIONS', 'PUT', 'POST', 'DELETE'):
# Admin ACL user can do anything
req = self._make_request(target, user_groups=mygroups,
environ={'REQUEST_METHOD': method})
resp = req.get_response(test_auth)
self.assertEqual(
resp.status_int, 204, "%s (%s) - expected 204, got %d" %
(target, method, resp.status_int))
# swift_owner should be set to True
if method != 'OPTIONS':
self.assertTrue(req.environ.get('swift_owner'))
def test_acl_syntax_verification(self):
test_auth = auth.filter_factory(
self._conf({'user_admin_user': 'testing .admin'}))(
FakeApp(iter(NO_CONTENT_RESP * 5)))
user_groups = test_auth._get_user_groups('admin', 'admin:user',
'AUTH_admin')
good_headers = {'X-Auth-Token': 'AUTH_t'}
good_acl = json.dumps({"read-only": [u"á", "b"]})
bad_list_types = '{"read-only": ["a", 99]}'
bad_acl = 'syntactically invalid acl -- this does not parse as JSON'
wrong_acl = '{"other-auth-system":["valid","json","but","wrong"]}'
bad_value_acl = '{"read-write":["fine"],"admin":"should be a list"}'
not_dict_acl = '["read-only"]'
not_dict_acl2 = 1
empty_acls = ['{}', '', '{ }']
target = '/v1/%s_firstacct' % self.accpre
# no acls -- no problem!
req = self._make_request(target, headers=good_headers,
user_groups=user_groups)
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
# syntactically valid acls should go through
update = {'x-account-access-control': good_acl}
req = self._make_request(target, user_groups=user_groups,
headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204,
'Expected 204, got %s, response body: %s'
% (resp.status_int, resp.body))
# syntactically valid empty acls should go through
for acl in empty_acls:
update = {'x-account-access-control': acl}
req = self._make_request(target, user_groups=user_groups,
headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
errmsg = 'X-Account-Access-Control invalid: %s'
# syntactically invalid acls get a 400
update = {'x-account-access-control': bad_acl}
req = self._make_request(target, headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 400)
self.assertEqual(errmsg % "Syntax error", resp.body[:46])
# syntactically valid acls with bad keys also get a 400
update = {'x-account-access-control': wrong_acl}
req = self._make_request(target, headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 400)
self.assertTrue(resp.body.startswith(
errmsg % "Key 'other-auth-system' not recognized"), resp.body)
# acls with good keys but bad values also get a 400
update = {'x-account-access-control': bad_value_acl}
req = self._make_request(target, headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 400)
self.assertTrue(resp.body.startswith(
errmsg % "Value for key 'admin' must be a list"), resp.body)
# acls with non-string-types in list also get a 400
update = {'x-account-access-control': bad_list_types}
req = self._make_request(target, headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 400)
self.assertTrue(resp.body.startswith(
errmsg % "Elements of 'read-only' list must be strings"),
resp.body)
# acls with wrong json structure also get a 400
update = {'x-account-access-control': not_dict_acl}
req = self._make_request(target, headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 400)
self.assertEqual(errmsg % "Syntax error", resp.body[:46])
# acls with wrong json structure also get a 400
update = {'x-account-access-control': not_dict_acl2}
req = self._make_request(target, headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 400)
self.assertEqual(errmsg % "Syntax error", resp.body[:46])
def test_acls_propagate_to_sysmeta(self):
test_auth = auth.filter_factory({'user_admin_user': 'testing'})(
FakeApp(iter(NO_CONTENT_RESP * 3)))
sysmeta_hdr = 'x-account-sysmeta-core-access-control'
target = '/v1/AUTH_firstacct'
good_headers = {'X-Auth-Token': 'AUTH_t'}
good_acl = '{"read-only":["a","b"]}'
# no acls -- no problem!
req = self._make_request(target, headers=good_headers)
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
self.assertIsNone(req.headers.get(sysmeta_hdr))
# syntactically valid acls should go through
update = {'x-account-access-control': good_acl}
req = self._make_request(target, headers=dict(good_headers, **update))
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 204)
self.assertEqual(good_acl, req.headers.get(sysmeta_hdr))
def test_bad_acls_get_denied(self):
test_auth = auth.filter_factory({'user_admin_user': 'testing'})(
FakeApp(iter(NO_CONTENT_RESP * 3)))
target = '/v1/AUTH_firstacct'
good_headers = {'X-Auth-Token': 'AUTH_t'}
bad_acls = (
'syntax error',
'{"bad_key":"should_fail"}',
'{"admin":"not a list, should fail"}',
'{"admin":["valid"],"read-write":"not a list, should fail"}',
)
for bad_acl in bad_acls:
hdrs = dict(good_headers, **{'x-account-access-control': bad_acl})
req = self._make_request(target, headers=hdrs)
resp = req.get_response(test_auth)
self.assertEqual(resp.status_int, 400)
class TestAuthMultiplePrefixes(TestAccountAcls):
"""
These tests repeat the same tests as TestAccountACLs,
but use multiple reseller prefix items (AUTH_ and SOMEOTHER_).
The target paths are /v1/SOMEOTHER_<blah>
"""
def setUp(self):
self.reseller_prefix = {'reseller_prefix': 'AUTH_, SOMEOTHER_'}
self.accpre = 'SOMEOTHER'
class PrefixAccount(unittest.TestCase):
def test_default(self):
conf = {}
test_auth = auth.filter_factory(conf)(FakeApp())
self.assertEqual(test_auth._get_account_prefix(
'AUTH_1234'), 'AUTH_')
self.assertEqual(test_auth._get_account_prefix(
'JUNK_1234'), None)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH'}
test_auth = auth.filter_factory(conf)(FakeApp())
self.assertEqual(test_auth._get_account_prefix(
'AUTH_1234'), 'AUTH_')
self.assertEqual(test_auth._get_account_prefix(
'JUNK_1234'), None)
def test_blank_reseller(self):
conf = {'reseller_prefix': ''}
test_auth = auth.filter_factory(conf)(FakeApp())
self.assertEqual(test_auth._get_account_prefix(
'1234'), '')
self.assertEqual(test_auth._get_account_prefix(
'JUNK_1234'), '') # yes, it should return ''
def test_multiple_resellers(self):
conf = {'reseller_prefix': 'AUTH, PRE2'}
test_auth = auth.filter_factory(conf)(FakeApp())
self.assertEqual(test_auth._get_account_prefix(
'AUTH_1234'), 'AUTH_')
self.assertEqual(test_auth._get_account_prefix(
'JUNK_1234'), None)
class ServiceTokenFunctionality(unittest.TestCase):
def _make_authed_request(self, conf, remote_user, path, method='GET'):
"""Make a request with tempauth as auth
Acts as though the user had presented a token
granting groups as described in remote_user.
If remote_user contains the .service group, it emulates presenting
X-Service-Token containing a .service group.
:param conf: configuration for tempauth
:param remote_user: the groups the user belongs to. Examples:
acct:joe,acct user joe, no .admin
acct:joe,acct,AUTH_joeacct user joe, jas .admin group
acct:joe,acct,AUTH_joeacct,.service adds .service group
:param path: the path of the request
:param method: the method (defaults to GET)
:returns: response object
"""
self.req = Request.blank(path)
self.req.method = method
self.req.remote_user = remote_user
fake_app = FakeApp(iter([('200 OK', {}, '')]))
test_auth = auth.filter_factory(conf)(fake_app)
resp = self.req.get_response(test_auth)
return resp
def test_authed_for_path_single(self):
resp = self._make_authed_request({}, 'acct:joe,acct,AUTH_acct',
'/v1/AUTH_acct')
self.assertEqual(resp.status_int, 200)
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH'}, 'acct:joe,acct,AUTH_acct',
'/v1/AUTH_acct/c', method='PUT')
self.assertEqual(resp.status_int, 200)
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH'},
'admin:mary,admin,AUTH_admin,.reseller_admin',
'/v1/AUTH_acct', method='GET')
self.assertEqual(resp.status_int, 200)
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH'},
'admin:mary,admin,AUTH_admin,.reseller_admin',
'/v1/AUTH_acct', method='DELETE')
self.assertEqual(resp.status_int, 200)
def test_denied_for_path_single(self):
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH'},
'fredacc:fred,fredacct,AUTH_fredacc',
'/v1/AUTH_acct')
self.assertEqual(resp.status_int, 403)
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH'},
'acct:joe,acct',
'/v1/AUTH_acct',
method='PUT')
self.assertEqual(resp.status_int, 403)
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH'},
'acct:joe,acct,AUTH_acct',
'/v1/AUTH_acct',
method='DELETE')
self.assertEqual(resp.status_int, 403)
def test_authed_for_primary_path_multiple(self):
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH, PRE2'},
'acct:joe,acct,AUTH_acct,PRE2_acct',
'/v1/PRE2_acct')
self.assertEqual(resp.status_int, 200)
def test_denied_for_second_path_with_only_operator_role(self):
# User only presents a token in X-Auth-Token (or in X-Service-Token)
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'},
'acct:joe,acct,AUTH_acct,PRE2_acct',
'/v1/PRE2_acct')
self.assertEqual(resp.status_int, 403)
# User puts token in both X-Auth-Token and X-Service-Token
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'},
'acct:joe,acct,AUTH_acct,PRE2_acct,AUTH_acct,PRE2_acct',
'/v1/PRE2_acct')
self.assertEqual(resp.status_int, 403)
def test_authed_for_second_path_with_operator_role_and_service(self):
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'},
'acct:joe,acct,AUTH_acct,PRE2_acct,'
'admin:mary,admin,AUTH_admin,PRE2_admin,.service',
'/v1/PRE2_acct')
self.assertEqual(resp.status_int, 200)
def test_denied_for_second_path_with_only_service(self):
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'},
'admin:mary,admin,AUTH_admin,PRE2_admin,.service',
'/v1/PRE2_acct')
self.assertEqual(resp.status_int, 403)
def test_denied_for_second_path_for_service_user(self):
# User presents token with 'service' role in X-Auth-Token
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'},
'admin:mary,admin,AUTH_admin,PRE2_admin,.service',
'/v1/PRE2_acct')
self.assertEqual(resp.status_int, 403)
# User presents token with 'service' role in X-Auth-Token
# and also in X-Service-Token
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'},
'admin:mary,admin,AUTH_admin,PRE2_admin,.service,'
'admin:mary,admin,AUTH_admin,PRE2_admin,.service',
'/v1/PRE2_acct')
self.assertEqual(resp.status_int, 403)
def test_delete_denied_for_second_path(self):
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'},
'acct:joe,acct,AUTH_acct,PRE2_acct,'
'admin:mary,admin,AUTH_admin,PRE2_admin,.service',
'/v1/PRE2_acct',
method='DELETE')
self.assertEqual(resp.status_int, 403)
def test_delete_of_second_path_by_reseller_admin(self):
resp = self._make_authed_request(
{'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'},
'acct:joe,acct,AUTH_acct,PRE2_acct,'
'admin:mary,admin,AUTH_admin,PRE2_admin,.reseller_admin',
'/v1/PRE2_acct',
method='DELETE')
self.assertEqual(resp.status_int, 200)
class TestTokenHandling(unittest.TestCase):
def _make_request(self, conf, path, headers, method='GET'):
"""Make a request with tempauth as auth
It sets up AUTH_t and AUTH_s as tokens in memcache, where "joe"
has .admin role on /v1/AUTH_acct and user "glance" has .service
role on /v1/AUTH_admin.
:param conf: configuration for tempauth
:param path: the path of the request
:param headers: allows you to pass X-Auth-Token, etc.
:param method: the method (defaults to GET)
:returns: response object
"""
fake_app = FakeApp(iter([('200 OK', {}, '')]))
self.test_auth = auth.filter_factory(conf)(fake_app)
self.req = Request.blank(path, headers=headers)
self.req.method = method
self.req.environ['swift.cache'] = FakeMemcache()
self._setup_user_and_token('AUTH_t', 'acct', 'acct:joe',
'.admin')
self._setup_user_and_token('AUTH_s', 'admin', 'admin:glance',
'.service')
resp = self.req.get_response(self.test_auth)
return resp
def _setup_user_and_token(self, token_name, account, account_user,
groups):
"""Setup named token in memcache
:param token_name: name of token
:param account: example: acct
:param account_user: example: acct_joe
:param groups: example: .admin
"""
self.test_auth.users[account_user] = dict(groups=[groups])
account_id = 'AUTH_%s' % account
cache_key = 'AUTH_/token/%s' % token_name
cache_entry = (time() + 3600,
self.test_auth._get_user_groups(account,
account_user,
account_id))
self.req.environ['swift.cache'].set(cache_key, cache_entry)
def test_tokens_set_remote_user(self):
conf = {} # Default conf
resp = self._make_request(conf, '/v1/AUTH_acct',
{'x-auth-token': 'AUTH_t'})
self.assertEqual(self.req.environ['REMOTE_USER'],
'acct,acct:joe,AUTH_acct')
self.assertEqual(resp.status_int, 200)
# Add x-service-token
resp = self._make_request(conf, '/v1/AUTH_acct',
{'x-auth-token': 'AUTH_t',
'x-service-token': 'AUTH_s'})
self.assertEqual(self.req.environ['REMOTE_USER'],
'acct,acct:joe,AUTH_acct,admin,admin:glance,.service')
self.assertEqual(resp.status_int, 200)
# Put x-auth-token value into x-service-token
resp = self._make_request(conf, '/v1/AUTH_acct',
{'x-auth-token': 'AUTH_t',
'x-service-token': 'AUTH_t'})
self.assertEqual(self.req.environ['REMOTE_USER'],
'acct,acct:joe,AUTH_acct,acct,acct:joe,AUTH_acct')
self.assertEqual(resp.status_int, 200)
def test_service_token_given_and_needed(self):
conf = {'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'}
resp = self._make_request(conf, '/v1/PRE2_acct',
{'x-auth-token': 'AUTH_t',
'x-service-token': 'AUTH_s'})
self.assertEqual(resp.status_int, 200)
def test_service_token_omitted(self):
conf = {'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'}
resp = self._make_request(conf, '/v1/PRE2_acct',
{'x-auth-token': 'AUTH_t'})
self.assertEqual(resp.status_int, 403)
def test_invalid_tokens(self):
conf = {'reseller_prefix': 'AUTH, PRE2',
'PRE2_require_group': '.service'}
resp = self._make_request(conf, '/v1/PRE2_acct',
{'x-auth-token': 'AUTH_junk'})
self.assertEqual(resp.status_int, 401)
resp = self._make_request(conf, '/v1/PRE2_acct',
{'x-auth-token': 'AUTH_t',
'x-service-token': 'AUTH_junk'})
self.assertEqual(resp.status_int, 403)
resp = self._make_request(conf, '/v1/PRE2_acct',
{'x-auth-token': 'AUTH_junk',
'x-service-token': 'AUTH_s'})
self.assertEqual(resp.status_int, 401)
class TestUtilityMethods(unittest.TestCase):
def test_account_acls_bad_path_raises_exception(self):
auth_inst = auth.filter_factory({})(FakeApp())
req = Request({'PATH_INFO': '/'})
self.assertRaises(ValueError, auth_inst.account_acls, req)
if __name__ == '__main__':
unittest.main()
|
edgarli/proj8 | refs/heads/master | env/lib/python3.4/site-packages/wheel/test/test_basic.py | 472 | """
Basic wheel tests.
"""
import os
import pkg_resources
import json
import sys
from pkg_resources import resource_filename
import wheel.util
import wheel.tool
from wheel import egg2wheel
from wheel.install import WheelFile
from zipfile import ZipFile
from shutil import rmtree
test_distributions = ("complex-dist", "simple.dist", "headers.dist")
def teardown_module():
"""Delete eggs/wheels created by tests."""
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
for subdir in ('build', 'dist'):
try:
rmtree(os.path.join(base, dist, subdir))
except OSError:
pass
def setup_module():
build_wheel()
build_egg()
def build_wheel():
"""Build wheels from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_wheel']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def build_egg():
"""Build eggs from test distributions."""
for dist in test_distributions:
pwd = os.path.abspath(os.curdir)
distdir = pkg_resources.resource_filename('wheel.test', dist)
os.chdir(distdir)
try:
sys.argv = ['', 'bdist_egg']
exec(compile(open('setup.py').read(), 'setup.py', 'exec'))
finally:
os.chdir(pwd)
def test_findable():
"""Make sure pkg_resources can find us."""
assert pkg_resources.working_set.by_key['wheel'].version
def test_egg_re():
"""Make sure egg_info_re matches."""
egg_names = open(pkg_resources.resource_filename('wheel', 'eggnames.txt'))
for line in egg_names:
line = line.strip()
if not line:
continue
assert egg2wheel.egg_info_re.match(line), line
def test_compatibility_tags():
"""Test compatibilty tags are working."""
wf = WheelFile("package-1.0.0-cp32.cp33-noabi-noarch.whl")
assert (list(wf.compatibility_tags) ==
[('cp32', 'noabi', 'noarch'), ('cp33', 'noabi', 'noarch')])
assert (wf.arity == 2)
wf2 = WheelFile("package-1.0.0-1st-cp33-noabi-noarch.whl")
wf2_info = wf2.parsed_filename.groupdict()
assert wf2_info['build'] == '1st', wf2_info
def test_convert_egg():
base = pkg_resources.resource_filename('wheel.test', '')
for dist in test_distributions:
distdir = os.path.join(base, dist, 'dist')
eggs = [e for e in os.listdir(distdir) if e.endswith('.egg')]
wheel.tool.convert(eggs, distdir, verbose=False)
def test_unpack():
"""
Make sure 'wheel unpack' works.
This also verifies the integrity of our testing wheel files.
"""
for dist in test_distributions:
distdir = pkg_resources.resource_filename('wheel.test',
os.path.join(dist, 'dist'))
for wheelfile in (w for w in os.listdir(distdir) if w.endswith('.whl')):
wheel.tool.unpack(os.path.join(distdir, wheelfile), distdir)
def test_no_scripts():
"""Make sure entry point scripts are not generated."""
dist = "complex-dist"
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
assert not '.data/scripts/' in entry.filename
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
import jsonschema
def open_json(filename):
return json.loads(open(filename, 'rb').read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found"
def test_util():
"""Test functions in util.py."""
for i in range(10):
before = b'*' * i
encoded = wheel.util.urlsafe_b64encode(before)
assert not encoded.endswith(b'=')
after = wheel.util.urlsafe_b64decode(encoded)
assert before == after
def test_pick_best():
"""Test the wheel ranking algorithm."""
def get_tags(res):
info = res[-1].parsed_filename.groupdict()
return info['pyver'], info['abi'], info['plat']
cand_tags = [('py27', 'noabi', 'noarch'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'),
('cp26', 'noabi', 'linux_i686'),
('cp27', 'noabi', 'linux_x86_64'),
('cp26', 'noabi', 'linux_x86_64')]
cand_wheels = [WheelFile('testpkg-1.0-%s-%s-%s.whl' % t)
for t in cand_tags]
supported = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
supported2 = [('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch'),
('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch')]
supported3 = [('cp26', 'noabi', 'linux_i686'), ('py26', 'noabi', 'noarch'),
('cp27', 'noabi', 'linux_i686'), ('py27', 'noabi', 'noarch')]
for supp in (supported, supported2, supported3):
context = lambda: list(supp)
for wheel in cand_wheels:
wheel.context = context
best = max(cand_wheels)
assert list(best.tags)[0] == supp[0]
# assert_equal(
# list(map(get_tags, pick_best(cand_wheels, supp, top=False))), supp)
|
bowenliu16/deepchem | refs/heads/master | deepchem/feat/tests/test_fingerprints.py | 4 | """
Test topological fingerprints.
"""
import unittest
from rdkit import Chem
from deepchem.feat import fingerprints as fp
class TestCircularFingerprint(unittest.TestCase):
"""
Tests for CircularFingerprint.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
self.engine = fp.CircularFingerprint()
def test_circular_fingerprints(self):
"""
Test CircularFingerprint.
"""
rval = self.engine([self.mol])
assert rval.shape == (1, self.engine.size)
def test_sparse_circular_fingerprints(self):
"""
Test CircularFingerprint with sparse encoding.
"""
self.engine = fp.CircularFingerprint(sparse=True)
rval = self.engine([self.mol])
assert rval.shape == (1,)
assert isinstance(rval[0], dict)
assert len(rval[0])
def test_sparse_circular_fingerprints_with_smiles(self):
"""
Test CircularFingerprint with sparse encoding and SMILES for each
fragment.
"""
self.engine = fp.CircularFingerprint(sparse=True, smiles=True)
rval = self.engine([self.mol])
assert rval.shape == (1,)
assert isinstance(rval[0], dict)
assert len(rval[0])
# check for separate count and SMILES entries for each fragment
for fragment_id, value in rval[0].items():
assert 'count' in value
assert 'smiles' in value
|
r00tkid/mr_bot | refs/heads/master | mr_bot/sender.py | 1 | # -*- coding: utf-8 -*-
#!/usr/bin/python
import traceback
from mr_logger.logger import Logger
from telegram.error import BadRequest, Unauthorized
logger = Logger()
class Sender:
""" this class will send bot messages (photo, voice, video, audio, docs) """
""" bot is instance if Telegram bot """
""" return True on success and false on error """
bot = None
def __init__(self, bot):
self.bot = bot
def edit_message_text(self, message_id, chat_id, text, reply_markup, parse_mode='HTML'):
try:
self.bot.editMessageText(message_id=message_id, chat_id=chat_id,
text=text, reply_markup=reply_markup,
parse_mode=parse_mode)
return True
except BadRequest as bad_request_exception:
logger.warn(traceback.format_exception_only(type(bad_request_exception),
bad_request_exception))
except Unauthorized:
logger.warn('[Unauthorized]: cant edit message text.\nID=%s' % str(message_id))
except Exception as e:
logger.err(e)
return False
def edit_inline_message_text(self, inline_message_id, text, reply_markup):
try:
self.bot.editMessageText(inline_message_id=inline_message_id,
text=text, reply_markup=reply_markup)
return True
except BadRequest as bad_request_exception:
logger.warn(traceback.format_exception_only(type(bad_request_exception),
bad_request_exception))
except Unauthorized:
logger.warn('[Unauthorized]: cant edit message text.\nID=%s' % str(inline_message_id))
except Exception as e:
logger.err(e)
return False
# todo venue
def send_message(self, chat_id, parse_mode=None, text=None, photo=None, video=None, document=None,
voice=None, audio=None, sticker=None, location=None, contact=None,
reply_markup=None, reply_to_message_id=None):
try:
if text:
if not parse_mode:
self.bot.sendMessage(chat_id=chat_id,
text=text, reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id)
else:
self.bot.sendMessage(chat_id=chat_id,
text=text, reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id, parse_mode=parse_mode)
elif photo:
self.bot.sendPhoto(chat_id=chat_id, photo=photo, reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id)
elif video:
self.bot.sendVideo(chat_id=chat_id, video=video, reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id)
elif document:
self.bot.sendDocument(chat_id=chat_id, document=document, reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id)
elif voice:
self.bot.sendVoice(chat_id=chat_id, voice=voice, reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id)
elif audio:
self.bot.sendAudio(chat_id=chat_id, audio=audio, reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id)
elif sticker:
self.bot.sendSticker(chat_id=chat_id, sticker=sticker, reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id)
elif location:
self.bot.sendLocation(chat_id=chat_id, latitude=location.latitude,
longitude=location.longitude, reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id)
elif contact:
self.bot.sendContact(chat_id=chat_id, phone_number=contact.phone_number,
first_name=contact.first_name, reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id)
return True
except BadRequest as bad_request_exception:
logger.warn(traceback.format_exception_only(type(bad_request_exception),
bad_request_exception))
except Unauthorized:
logger.warn('cant send message to chat_id %s ( the bot was blocked by the user )'
% str(chat_id))
except Exception as e:
logger.err(e)
return False
|
aaronfc/notipy | refs/heads/master | notipy/__main__.py | 1 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import sys
from notipy.cli import Notipy
def main(args=None):
if args is None:
args = sys.argv[1:]
if len(args) >= 1:
if len(args) == 1:
message = args[0]
else:
message = ' '.join(args)
Notipy().send(message)
return 0
if __name__ == "__main__":
sys.exit(main())
|
bcarroll/authmgr | refs/heads/master | python-3.6.2-Win64/Lib/encodings/iso2022_jp_ext.py | 816 | #
# iso2022_jp_ext.py: Python Unicode Codec for ISO2022_JP_EXT
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_ext')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_ext',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
nojhan/weboob-devel | refs/heads/master | tools/boilerplate_data/layout.py | 11 | ${coding}
# Copyright(C) ${r.year} ${r.author}
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
${self.body()}\
|
ol-loginov/intellij-community | refs/heads/master | python/testData/stubs/AugAssignDunderAll.py | 83 | __all__ = ['foo', 'bar']
for i in range(5):
__all__ += 'f' + str(i)
|
memtoko/django | refs/heads/master | tests/multiple_database/models.py | 99 | from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Review(models.Model):
source = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
def __str__(self):
return self.source
class Meta:
ordering = ('source',)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
# This book manager doesn't do anything interesting; it just
# exists to strip out the 'extra_arg' argument to certain
# calls. This argument is used to establish that the BookManager
# is actually getting used when it should be.
class BookManager(models.Manager):
def create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).create(*args, **kwargs)
def get_or_create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).get_or_create(*args, **kwargs)
@python_2_unicode_compatible
class Book(models.Model):
objects = BookManager()
title = models.CharField(max_length=100)
published = models.DateField()
authors = models.ManyToManyField(Person)
editor = models.ForeignKey(Person, null=True, related_name='edited')
reviews = GenericRelation(Review)
pages = models.IntegerField(default=100)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
@python_2_unicode_compatible
class Pet(models.Model):
name = models.CharField(max_length=100)
owner = models.ForeignKey(Person)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class UserProfile(models.Model):
user = models.OneToOneField(User, null=True)
flavor = models.CharField(max_length=100)
class Meta:
ordering = ('flavor',)
|
PanDAWMS/panda-harvester | refs/heads/master | pandaharvester/harvestermonitor/dummy_mcore_monitor.py | 2 | import os.path
from concurrent.futures import ProcessPoolExecutor as Pool
from pandaharvester.harvestercore.work_spec import WorkSpec
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestercore import core_utils
# logger
baseLogger = core_utils.setup_logger('dummy_mcore_monitor')
# check a worker
def check_a_worker(workspec):
# make logger
tmpLog = core_utils.make_logger(baseLogger, 'workerID={0}'.format(workspec.workerID),
method_name='check_a_worker')
dummyFilePath = os.path.join(workspec.get_access_point(), 'status.txt')
tmpLog.debug('look for {0}'.format(dummyFilePath))
newStatus = WorkSpec.ST_finished
try:
with open(dummyFilePath) as dummyFile:
newStatus = dummyFile.readline()
newStatus = newStatus.strip()
except:
pass
tmpLog.debug('newStatus={0}'.format(newStatus))
return (newStatus, '')
# dummy monitor with multi-cores
class DummyMcoreMonitor(PluginBase):
# constructor
def __init__(self, **kwarg):
PluginBase.__init__(self, **kwarg)
# check workers
def check_workers(self, workspec_list):
# make logger
tmpLog = self.make_logger(baseLogger, method_name='check_workers')
tmpLog.debug('start nWorkers={0}'.format(len(workspec_list)))
with Pool() as pool:
retList = pool.map(check_a_worker, workspec_list)
tmpLog.debug('done')
return True, retList
|
mitar/django | refs/heads/master | tests/regressiontests/multiple_database/models.py | 43 | from __future__ import absolute_import
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.db import models
class Review(models.Model):
source = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
def __unicode__(self):
return self.source
class Meta:
ordering = ('source',)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
# This book manager doesn't do anything interesting; it just
# exists to strip out the 'extra_arg' argument to certain
# calls. This argument is used to establish that the BookManager
# is actually getting used when it should be.
class BookManager(models.Manager):
def create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).create(*args, **kwargs)
def get_or_create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).get_or_create(*args, **kwargs)
class Book(models.Model):
objects = BookManager()
title = models.CharField(max_length=100)
published = models.DateField()
authors = models.ManyToManyField(Person)
editor = models.ForeignKey(Person, null=True, related_name='edited')
reviews = generic.GenericRelation(Review)
pages = models.IntegerField(default=100)
def __unicode__(self):
return self.title
class Meta:
ordering = ('title',)
class Pet(models.Model):
name = models.CharField(max_length=100)
owner = models.ForeignKey(Person)
def __unicode__(self):
return self.name
class Meta:
ordering = ('name',)
class UserProfile(models.Model):
user = models.OneToOneField(User, null=True)
flavor = models.CharField(max_length=100)
class Meta:
ordering = ('flavor',)
|
dycodedev/taiga-back | refs/heads/master | tests/integration/test_custom_attributes_issues.py | 20 | # Copyright (C) 2015 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2015 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2015 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db.transaction import atomic
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from .. import factories as f
import pytest
pytestmark = pytest.mark.django_db
#########################################################
# Issue Custom Attributes
#########################################################
def test_issue_custom_attribute_duplicate_name_error_on_create(client):
custom_attr_1 = f.IssueCustomAttributeFactory()
member = f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_1.project,
is_owner=True)
url = reverse("issue-custom-attributes-list")
data = {"name": custom_attr_1.name,
"project": custom_attr_1.project.pk}
client.login(member.user)
response = client.json.post(url, json.dumps(data))
assert response.status_code == 400
def test_issue_custom_attribute_duplicate_name_error_on_update(client):
custom_attr_1 = f.IssueCustomAttributeFactory()
custom_attr_2 = f.IssueCustomAttributeFactory(project=custom_attr_1.project)
member = f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_1.project,
is_owner=True)
url = reverse("issue-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk})
data = {"name": custom_attr_1.name}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400
def test_issue_custom_attribute_duplicate_name_error_on_move_between_projects(client):
custom_attr_1 = f.IssueCustomAttributeFactory()
custom_attr_2 = f.IssueCustomAttributeFactory(name=custom_attr_1.name)
member = f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_1.project,
is_owner=True)
f.MembershipFactory(user=custom_attr_1.project.owner,
project=custom_attr_2.project,
is_owner=True)
url = reverse("issue-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk})
data = {"project": custom_attr_1.project.pk}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400
#########################################################
# Issue Custom Attributes Values
#########################################################
def test_issue_custom_attributes_values_when_create_us(client):
issue = f.IssueFactory()
assert issue.custom_attributes_values.attributes_values == {}
def test_issue_custom_attributes_values_update(client):
issue = f.IssueFactory()
member = f.MembershipFactory(user=issue.project.owner,
project=issue.project,
is_owner=True)
custom_attr_1 = f.IssueCustomAttributeFactory(project=issue.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.IssueCustomAttributeFactory(project=issue.project)
ct2_id = "{}".format(custom_attr_2.id)
custom_attrs_val = issue.custom_attributes_values
url = reverse("issue-custom-attributes-values-detail", args=[issue.id])
data = {
"attributes_values": {
ct1_id: "test_1_updated",
ct2_id: "test_2_updated"
},
"version": custom_attrs_val.version
}
assert issue.custom_attributes_values.attributes_values == {}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200
assert response.data["attributes_values"] == data["attributes_values"]
issue = issue.__class__.objects.get(id=issue.id)
assert issue.custom_attributes_values.attributes_values == data["attributes_values"]
def test_issue_custom_attributes_values_update_with_error_invalid_key(client):
issue = f.IssueFactory()
member = f.MembershipFactory(user=issue.project.owner,
project=issue.project,
is_owner=True)
custom_attr_1 = f.IssueCustomAttributeFactory(project=issue.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.IssueCustomAttributeFactory(project=issue.project)
custom_attrs_val = issue.custom_attributes_values
url = reverse("issue-custom-attributes-values-detail", args=[issue.id])
data = {
"attributes_values": {
ct1_id: "test_1_updated",
"123456": "test_2_updated"
},
"version": custom_attrs_val.version
}
client.login(member.user)
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 400
def test_issue_custom_attributes_values_delete_issue(client):
issue = f.IssueFactory()
member = f.MembershipFactory(user=issue.project.owner,
project=issue.project,
is_owner=True)
custom_attr_1 = f.IssueCustomAttributeFactory(project=issue.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.IssueCustomAttributeFactory(project=issue.project)
ct2_id = "{}".format(custom_attr_2.id)
custom_attrs_val = issue.custom_attributes_values
url = reverse("issues-detail", args=[issue.id])
client.login(member.user)
response = client.json.delete(url)
assert response.status_code == 204
assert not issue.__class__.objects.filter(id=issue.id).exists()
assert not custom_attrs_val.__class__.objects.filter(id=custom_attrs_val.id).exists()
#########################################################
# Test tristres triggers :-P
#########################################################
def test_trigger_update_issuecustomvalues_afeter_remove_issuecustomattribute(client):
issue = f.IssueFactory()
member = f.MembershipFactory(user=issue.project.owner,
project=issue.project,
is_owner=True)
custom_attr_1 = f.IssueCustomAttributeFactory(project=issue.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.IssueCustomAttributeFactory(project=issue.project)
ct2_id = "{}".format(custom_attr_2.id)
custom_attrs_val = issue.custom_attributes_values
custom_attrs_val.attributes_values = {ct1_id: "test_1", ct2_id: "test_2"}
custom_attrs_val.save()
assert ct1_id in custom_attrs_val.attributes_values.keys()
assert ct2_id in custom_attrs_val.attributes_values.keys()
url = reverse("issue-custom-attributes-detail", kwargs={"pk": custom_attr_2.pk})
client.login(member.user)
response = client.json.delete(url)
assert response.status_code == 204
custom_attrs_val = custom_attrs_val.__class__.objects.get(id=custom_attrs_val.id)
assert not custom_attr_2.__class__.objects.filter(pk=custom_attr_2.pk).exists()
assert ct1_id in custom_attrs_val.attributes_values.keys()
assert ct2_id not in custom_attrs_val.attributes_values.keys()
|
pjryan126/solid-start-careers | refs/heads/master | store/api/zillow/venv/lib/python2.7/site-packages/sqlalchemy/sql/base.py | 42 | # sql/base.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Foundational utilities common to many sql modules.
"""
from .. import util, exc
import itertools
from .visitors import ClauseVisitor
import re
import collections
PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT')
NO_ARG = util.symbol('NO_ARG')
class Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
class _DialectArgView(collections.MutableMapping):
"""A dictionary view of dialect-level arguments in the form
<dialectname>_<argument_name>.
"""
def __init__(self, obj):
self.obj = obj
def _key(self, key):
try:
dialect, value_key = key.split("_", 1)
except ValueError:
raise KeyError(key)
else:
return dialect, value_key
def __getitem__(self, key):
dialect, value_key = self._key(key)
try:
opt = self.obj.dialect_options[dialect]
except exc.NoSuchModuleError:
raise KeyError(key)
else:
return opt[value_key]
def __setitem__(self, key, value):
try:
dialect, value_key = self._key(key)
except KeyError:
raise exc.ArgumentError(
"Keys must be of the form <dialectname>_<argname>")
else:
self.obj.dialect_options[dialect][value_key] = value
def __delitem__(self, key):
dialect, value_key = self._key(key)
del self.obj.dialect_options[dialect][value_key]
def __len__(self):
return sum(len(args._non_defaults) for args in
self.obj.dialect_options.values())
def __iter__(self):
return (
util.safe_kwarg("%s_%s" % (dialect_name, value_name))
for dialect_name in self.obj.dialect_options
for value_name in
self.obj.dialect_options[dialect_name]._non_defaults
)
class _DialectArgDict(collections.MutableMapping):
"""A dictionary view of dialect-level arguments for a specific
dialect.
Maintains a separate collection of user-specified arguments
and dialect-specified default arguments.
"""
def __init__(self):
self._non_defaults = {}
self._defaults = {}
def __len__(self):
return len(set(self._non_defaults).union(self._defaults))
def __iter__(self):
return iter(set(self._non_defaults).union(self._defaults))
def __getitem__(self, key):
if key in self._non_defaults:
return self._non_defaults[key]
else:
return self._defaults[key]
def __setitem__(self, key, value):
self._non_defaults[key] = value
def __delitem__(self, key):
del self._non_defaults[key]
class DialectKWArgs(object):
"""Establish the ability for a class to have dialect-specific arguments
with defaults and constructor validation.
The :class:`.DialectKWArgs` interacts with the
:attr:`.DefaultDialect.construct_arguments` present on a dialect.
.. seealso::
:attr:`.DefaultDialect.construct_arguments`
"""
@classmethod
def argument_for(cls, dialect_name, argument_name, default):
"""Add a new kind of dialect-specific keyword argument for this class.
E.g.::
Index.argument_for("mydialect", "length", None)
some_index = Index('a', 'b', mydialect_length=5)
The :meth:`.DialectKWArgs.argument_for` method is a per-argument
way adding extra arguments to the
:attr:`.DefaultDialect.construct_arguments` dictionary. This
dictionary provides a list of argument names accepted by various
schema-level constructs on behalf of a dialect.
New dialects should typically specify this dictionary all at once as a
data member of the dialect class. The use case for ad-hoc addition of
argument names is typically for end-user code that is also using
a custom compilation scheme which consumes the additional arguments.
:param dialect_name: name of a dialect. The dialect must be
locatable, else a :class:`.NoSuchModuleError` is raised. The
dialect must also include an existing
:attr:`.DefaultDialect.construct_arguments` collection, indicating
that it participates in the keyword-argument validation and default
system, else :class:`.ArgumentError` is raised. If the dialect does
not include this collection, then any keyword argument can be
specified on behalf of this dialect already. All dialects packaged
within SQLAlchemy include this collection, however for third party
dialects, support may vary.
:param argument_name: name of the parameter.
:param default: default value of the parameter.
.. versionadded:: 0.9.4
"""
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
if construct_arg_dictionary is None:
raise exc.ArgumentError(
"Dialect '%s' does have keyword-argument "
"validation and defaults enabled configured" %
dialect_name)
if cls not in construct_arg_dictionary:
construct_arg_dictionary[cls] = {}
construct_arg_dictionary[cls][argument_name] = default
@util.memoized_property
def dialect_kwargs(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
The arguments are present here in their original ``<dialect>_<kwarg>``
format. Only arguments that were actually passed are included;
unlike the :attr:`.DialectKWArgs.dialect_options` collection, which
contains all options known by this dialect including defaults.
The collection is also writable; keys are accepted of the
form ``<dialect>_<kwarg>`` where the value will be assembled
into the list of options.
.. versionadded:: 0.9.2
.. versionchanged:: 0.9.4 The :attr:`.DialectKWArgs.dialect_kwargs`
collection is now writable.
.. seealso::
:attr:`.DialectKWArgs.dialect_options` - nested dictionary form
"""
return _DialectArgView(self)
@property
def kwargs(self):
"""A synonym for :attr:`.DialectKWArgs.dialect_kwargs`."""
return self.dialect_kwargs
@util.dependencies("sqlalchemy.dialects")
def _kw_reg_for_dialect(dialects, dialect_name):
dialect_cls = dialects.registry.load(dialect_name)
if dialect_cls.construct_arguments is None:
return None
return dict(dialect_cls.construct_arguments)
_kw_registry = util.PopulateDict(_kw_reg_for_dialect)
def _kw_reg_for_dialect_cls(self, dialect_name):
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
d = _DialectArgDict()
if construct_arg_dictionary is None:
d._defaults.update({"*": None})
else:
for cls in reversed(self.__class__.__mro__):
if cls in construct_arg_dictionary:
d._defaults.update(construct_arg_dictionary[cls])
return d
@util.memoized_property
def dialect_options(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
This is a two-level nested registry, keyed to ``<dialect_name>``
and ``<argument_name>``. For example, the ``postgresql_where``
argument would be locatable as::
arg = my_object.dialect_options['postgresql']['where']
.. versionadded:: 0.9.2
.. seealso::
:attr:`.DialectKWArgs.dialect_kwargs` - flat dictionary form
"""
return util.PopulateDict(
util.portable_instancemethod(self._kw_reg_for_dialect_cls)
)
def _validate_dialect_kwargs(self, kwargs):
# validate remaining kwargs that they all specify DB prefixes
if not kwargs:
return
for k in kwargs:
m = re.match('^(.+?)_(.+)$', k)
if not m:
raise TypeError(
"Additional arguments should be "
"named <dialectname>_<argument>, got '%s'" % k)
dialect_name, arg_name = m.group(1, 2)
try:
construct_arg_dictionary = self.dialect_options[dialect_name]
except exc.NoSuchModuleError:
util.warn(
"Can't validate argument %r; can't "
"locate any SQLAlchemy dialect named %r" %
(k, dialect_name))
self.dialect_options[dialect_name] = d = _DialectArgDict()
d._defaults.update({"*": None})
d._non_defaults[arg_name] = kwargs[k]
else:
if "*" not in construct_arg_dictionary and \
arg_name not in construct_arg_dictionary:
raise exc.ArgumentError(
"Argument %r is not accepted by "
"dialect %r on behalf of %r" % (
k,
dialect_name, self.__class__
))
else:
construct_arg_dictionary[arg_name] = kwargs[k]
class Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(Generative):
"""Mark a ClauseElement as supporting execution.
:class:`.Executable` is a superclass for all "statement" types
of objects, including :func:`select`, :func:`delete`, :func:`update`,
:func:`insert`, :func:`text`.
"""
supports_execution = True
_execution_options = util.immutabledict()
_bind = None
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during
execution.
Execution options can be set on a per-statement or
per :class:`.Connection` basis. Additionally, the
:class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide
access to execution options which they in turn configure upon
connections.
The :meth:`execution_options` method is generative. A new
instance of this statement is returned that contains the options::
statement = select([table.c.x, table.c.y])
statement = statement.execution_options(autocommit=True)
Note that only a subset of possible execution options can be applied
to a statement - these include "autocommit" and "stream_results",
but not "isolation_level" or "compiled_cache".
See :meth:`.Connection.execution_options` for a full list of
possible options.
.. seealso::
:meth:`.Connection.execution_options()`
:meth:`.Query.execution_options()`
"""
if 'isolation_level' in kw:
raise exc.ArgumentError(
"'isolation_level' execution option may only be specified "
"on Connection.execution_options(), or "
"per-engine using the isolation_level "
"argument to create_engine()."
)
if 'compiled_cache' in kw:
raise exc.ArgumentError(
"'compiled_cache' execution option may only be specified "
"on Connection.execution_options(), not per statement."
)
self._execution_options = self._execution_options.union(kw)
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`."""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not directly bound to a Connection or Engine.'
'Use the .execute() method of a Connection or Engine '
'to execute this construct.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`, returning the
result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
@property
def bind(self):
"""Returns the :class:`.Engine` or :class:`.Connection` to
which this :class:`.Executable` is bound, or None if none found.
This is a traversal which checks locally, then
checks among the "from" clauses of associated objects
until a bound engine or connection is found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
class SchemaEventTarget(object):
"""Base class for elements that are the targets of :class:`.DDLEvents`
events.
This includes :class:`.SchemaItem` as well as :class:`.SchemaType`.
"""
def _set_parent(self, parent):
"""Associate with this SchemaEvent's parent object."""
raise NotImplementedError()
def _set_parent_with_dispatch(self, parent):
self.dispatch.before_parent_attach(self, parent)
self._set_parent(parent)
self.dispatch.after_parent_attach(self, parent)
class SchemaVisitor(ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {'schema_visitor': True}
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
__slots__ = '_all_columns'
def __init__(self, *columns):
super(ColumnCollection, self).__init__()
object.__setattr__(self, '_all_columns', [])
for c in columns:
self.add(c)
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
"""add the given column to this collection, removing unaliased
versions of this column as well as existing columns with the
same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
remove_col = None
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
remove_col = other
del self._data[other.key]
if column.key in self._data:
remove_col = self._data[column.key]
self._data[column.key] = column
if remove_col is not None:
self._all_columns[:] = [column if c is remove_col
else c for c in self._all_columns]
else:
self._all_columns.append(column)
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
if not column.key:
raise exc.ArgumentError(
"Can't add unnamed column to column collection")
self[column.key] = column
def __delitem__(self, key):
raise NotImplementedError()
def __setattr__(self, key, object):
raise NotImplementedError()
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements
# which have conflicting column names in their exported
# columns collection
existing = self[key]
if not existing.shares_lineage(value):
util.warn('Column %r on table %r being replaced by '
'%r, which has the same key. Consider '
'use_labels for select() statements.' %
(key, getattr(existing, 'table', None), value))
# pop out memoized proxy_set as this
# operation may very well be occurring
# in a _make_proxy operation
util.memoized_property.reset(value, "proxy_set")
self._all_columns.append(value)
self._data[key] = value
def clear(self):
raise NotImplementedError()
def remove(self, column):
del self._data[column.key]
self._all_columns[:] = [
c for c in self._all_columns if c is not column]
def update(self, iter):
cols = list(iter)
all_col_set = set(self._all_columns)
self._all_columns.extend(
c for label, c in cols if c not in all_col_set)
self._data.update((label, c) for label, c in cols)
def extend(self, iter):
cols = list(iter)
all_col_set = set(self._all_columns)
self._all_columns.extend(c for c in cols if c not in all_col_set)
self._data.update((c.key, c) for c in cols)
__hash__ = None
@util.dependencies("sqlalchemy.sql.elements")
def __eq__(self, elements, other):
l = []
for c in getattr(other, "_all_columns", other):
for local in self._all_columns:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __contains__(self, other):
if not isinstance(other, util.string_types):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def __getstate__(self):
return {'_data': self._data,
'_all_columns': self._all_columns}
def __setstate__(self, state):
object.__setattr__(self, '_data', state['_data'])
object.__setattr__(self, '_all_columns', state['_all_columns'])
def contains_column(self, col):
return col in set(self._all_columns)
def as_immutable(self):
return ImmutableColumnCollection(self._data, self._all_columns)
class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection):
def __init__(self, data, all_columns):
util.ImmutableProperties.__init__(self, data)
object.__setattr__(self, '_all_columns', all_columns)
extend = remove = util.ImmutableProperties._immutable
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
@util.dependencies("sqlalchemy.sql.elements")
def __eq__(self, elements, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
def _bind_or_error(schemaitem, msg=None):
bind = schemaitem.bind
if not bind:
name = schemaitem.__class__.__name__
label = getattr(schemaitem, 'fullname',
getattr(schemaitem, 'name', None))
if label:
item = '%s object %r' % (name, label)
else:
item = '%s object' % name
if msg is None:
msg = "%s is not bound to an Engine or Connection. "\
"Execution can not proceed without a database to execute "\
"against." % item
raise exc.UnboundExecutionError(msg)
return bind
|
Ivehui/DQN | refs/heads/master | trainMain.py | 1 | '''
@author Ivehui
@time 2016/06/05
@function: train the agent
'''
import logging
import os, sys
import dqn
import parameters as pms
import gym
import numpy as np
import caffe
from skimage.transform import resize
from transition import Transition as Tran
def transfer(rgbImage, new_dims):
im = np.dot(rgbImage[..., :3],
[0.229, 0.587, 0.144])
im_min, im_max = im.min(), im.max()
if im_max > im_min:
# skimage is fast but only understands {1,3} channel images
# in [0, 1].
im_std = (im - im_min) / (im_max - im_min)
resized_std = resize(im_std, new_dims, order=1)
resized_im = resized_std * (im_max - im_min) + im_min
else:
# the image is a constant -- avoid divide by 0
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(im_min)
return ret
return resized_im.astype(np.float32)
if __name__ == '__main__':
# # if isDisplsy == 0: no image plot
# isDisplay = 1
# You can optionally set up the logger. Also fine to set the level
# to logging.DEBUG or logging.WARN if you want to change the
# amount of output.
logger = logging.getLogger()
logger.setLevel(logging.INFO)
env = gym.make(pms.gameName)
# You provide the directory to write to (can be an existing
# directory, including one with existing data -- all monitor files
# will be namespaced). You can also dump to a tempdir if you'd
# like: tempfile.mkdtemp().
outdir = '/tmp/DQN-' + pms.gameName
env.monitor.start(outdir, force=True, seed=0)
# This declaration must go *after* the monitor call, since the
# monitor's seeding creates a new action_space instance with the
# appropriate pseudorandom number generator.
agent = dqn.DqnAgent(env.action_space)
tran = Tran(max_size=pms.bufferSize)
caffe.set_mode_gpu()
imageDim = np.array((pms.frameHeight,
pms.frameWidth))
curFrame = np.zeros((pms.frameChannel,
pms.frameHeight,
pms.frameWidth))
nextFrame = np.zeros((pms.frameChannel,
pms.frameHeight,
pms.frameWidth))
testStep = 0
update_step = 0
for i in range(pms.episodeCount):
rgbImage = env.reset()
# env.render()
done = False
for j in range(pms.frameChannel):
curFrame[j, ...] = transfer(rgbImage, imageDim)
rewardSum = 0
while(done == False):
eGreedy = max(pms.eGreedyFinal,
1 - testStep * (1 - pms.eGreedyFinal) / pms.finalNum)
actionNum = agent.act(curFrame, eGreedy)
reward = 0
for j in range(pms.frameChannel):
if(done == False):
rgbImage, rewardTemp, done, _ = env.step(actionNum)
nextFrame[j, ...] = transfer(rgbImage, imageDim)
reward += rewardTemp
# env.render()
# reward /= pms.frameChannel
tran.saveTran(curFrame, actionNum, reward, done)
curFrame = nextFrame.copy()
testStep += 1
rewardSum += reward
# training
overallSize = tran.getBufferSize()
if overallSize > pms.startSize:
for j in range(pms.trainNum):
selected = np.random.choice(overallSize - 1, pms.batchSize, replace=False)
if tran.getIsFull():
selected = selected - overallSize + tran.getSize()
# calculate the q_target
agent.train(tran, selected)
if update_step > pms.updateStep:
update_step = 0
agent.updateTarget()
else:
update_step += 1
print('No.' + str(testStep) + ' episode:' + str(i) + ' reward:' + str(rewardSum))
# Dump result info to disk
env.monitor.close()
# Upload to the scoreboard. We could also do this from another
# process if we wanted.
logger.info("Successfully ran RandomAgent. Now trying to upload results to the scoreboard. If it breaks, you can always just try re-uploading the same results.")
gym.upload(outdir)
|
viacoin/viacoin | refs/heads/master | test/functional/rpc_fundrawtransaction.py | 1 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid viacoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
|
wpensar/cnab240 | refs/heads/master | tests/test_registro.py | 1 | # -*- coding: utf-8 -*-
try:
import unittest2 as unittest
except ImportError:
import unittest
from decimal import Decimal
from cnab240 import errors
from cnab240.bancos import itau
from tests.data import get_itau_data_from_file
class TestRegistro(unittest.TestCase):
def setUp(self):
itau_data = get_itau_data_from_file()
self.header_arquivo = itau_data['header_arquivo']
self.seg_p = itau_data['seg_p1']
self.seg_p_str = itau_data['seg_p1_str']
self.seg_q = itau_data['seg_q1']
self.seg_q_str = itau_data['seg_q1_str']
def test_leitura_campo_num_decimal(self):
self.assertEqual(self.seg_p.valor_titulo, Decimal('100.00'))
def test_escrita_campo_num_decimal(self):
# aceitar somente tipo Decimal
with self.assertRaises(errors.TipoError):
self.seg_p.valor_titulo = 10.0
with self.assertRaises(errors.TipoError):
self.seg_p.valor_titulo = ''
# Testa se as casas decimais estao sendo verificadas
with self.assertRaises(errors.NumDecimaisError):
self.seg_p.valor_titulo = Decimal('100.2')
with self.assertRaises(errors.NumDecimaisError):
self.seg_p.valor_titulo = Decimal('1001')
with self.assertRaises(errors.NumDecimaisError):
self.seg_p.valor_titulo = Decimal('1.000')
# verifica se o numero de digitos esta sendo verificado
with self.assertRaises(errors.NumDigitosExcedidoError):
self.seg_p.valor_titulo = Decimal('10000000008100.21')
# armazemamento correto de um decimal
self.seg_p.valor_titulo = Decimal('2.13')
self.assertEqual(self.seg_p.valor_titulo, Decimal('2.13'))
def test_leitura_campo_num_int(self):
self.assertEqual(self.header_arquivo.controle_banco, 341)
def test_escrita_campo_num_int(self):
# aceitar somente inteiros (int e long)
with self.assertRaises(errors.TipoError):
self.header_arquivo.controle_banco = 10.0
with self.assertRaises(errors.TipoError):
self.header_arquivo.controle_banco = ''
# verifica se o numero de digitos esta sendo verificado
with self.assertRaises(errors.NumDigitosExcedidoError):
self.header_arquivo.controle_banco = 12345678234567890234567890
with self.assertRaises(errors.NumDigitosExcedidoError):
self.header_arquivo.controle_banco = 1234
# verifica valor armazenado
self.header_arquivo.controle_banco = 5
self.assertEqual(self.header_arquivo.controle_banco, 5)
def test_leitura_campo_alfa(self):
self.assertEqual(self.header_arquivo.cedente_nome,
'TRACY TECNOLOGIA LTDA ME')
def test_escrita_campo_alfa(self):
# Testa que serao aceitos apenas unicode objects
with self.assertRaises(errors.TipoError):
self.header_arquivo.cedente_nome = 'tracy'.encode()
# Testa que strings mais longas que obj.digitos nao serao aceitas
with self.assertRaises(errors.NumDigitosExcedidoError):
self.header_arquivo.cedente_convenio = '123456789012345678901'
# Testa que o valor atribuido foi guardado no objeto
self.header_arquivo.cedente_nome = 'tracy'
self.assertEqual(self.header_arquivo.cedente_nome, 'tracy')
def test_fromdict(self):
header_dict = self.header_arquivo.todict()
header_arquivo = itau.registros.HeaderArquivo(**header_dict)
self.assertEqual(header_arquivo.cedente_nome, 'TRACY TECNOLOGIA LTDA ME')
self.assertEqual(header_arquivo.nome_do_banco, 'BANCO ITAU SA')
def test_necessario(self):
self.assertTrue(self.seg_p)
seg_p2 = itau.registros.SegmentoP()
self.assertFalse(seg_p2.necessario())
seg_p2.controle_banco = 33
self.assertFalse(seg_p2.necessario())
seg_p2.vencimento_titulo = 10102012
self.assertTrue(seg_p2.necessario())
def test_unicode(self):
def unicode_test(seg_instance, seg_str):
seg_gen_str = str(seg_instance)
self.assertEqual(len(seg_gen_str), 240)
self.assertEqual(len(seg_str), 240)
self.assertEqual(seg_gen_str, seg_str)
unicode_test(self.seg_p, self.seg_p_str)
unicode_test(self.seg_q, self.seg_q_str)
if __name__ == '__main__':
unittest.main()
|
kisna72/django | refs/heads/master | tests/gis_tests/inspectapp/models.py | 302 | from ..models import models
class AllOGRFields(models.Model):
f_decimal = models.FloatField()
f_float = models.FloatField()
f_int = models.IntegerField()
f_char = models.CharField(max_length=10)
f_date = models.DateField()
f_datetime = models.DateTimeField()
f_time = models.TimeField()
geom = models.PolygonField()
point = models.PointField()
objects = models.GeoManager()
class Meta:
required_db_features = ['gis_enabled']
class Fields3D(models.Model):
point = models.PointField(dim=3)
line = models.LineStringField(dim=3)
poly = models.PolygonField(dim=3)
objects = models.GeoManager()
class Meta:
required_db_features = ['gis_enabled']
|
throwable-one/lettuce | refs/heads/master | tests/integration/django/celeries/leaves/features/foobar-steps.py | 19 | from lettuce import step
@step(r'Given I say foo bar')
def given_i_say_foo_bar(step):
pass
@step(r'Then it works')
def then_it_works(step):
pass
@step(r'Then it fails')
def then_it_fails(step):
assert False
|
SteerSuite/steersuite-rutgers | refs/heads/master | steerstats/tests/SteerStats_Tester.py | 8 | from optparse import OptionParser
import inspect
import sys
sys.path.append("../")
import SteerStats as SteerStats
ss = SteerStats.SteerStats(None)
methods = inspect.getmembers(ss, predicate=inspect.ismethod)
# print methods
objective = 'agentFlowMetricGlobal'
for method in methods:
print method[0]
if method[0] == objective:
print method
print ss.getBoundObjective(objective)
|
erinn/ansible | refs/heads/devel | lib/ansible/plugins/action/template.py | 3 | # (c) 2015, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import os
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum_s
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def get_checksum(self, tmp, dest, try_directory=False, source=None):
remote_checksum = self._remote_checksum(tmp, dest)
if remote_checksum in ('0', '2', '3', '4'):
# Note: 1 means the file is not present which is fine; template
# will create it. 3 means directory was specified instead of file
if try_directory and remote_checksum == '3' and source:
base = os.path.basename(source)
dest = os.path.join(dest, base)
remote_checksum = self.get_checksum(tmp, dest, try_directory=False)
if remote_checksum not in ('0', '2', '3', '4'):
return remote_checksum
result = dict(failed=True, msg="failed to checksum remote file."
" Checksum error code: %s" % remote_checksum)
return result
return remote_checksum
def run(self, tmp=None, task_vars=dict()):
''' handler for template operations '''
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
faf = task_vars.get('first_available_file', None)
if (source is None and faf is not None) or dest is None:
return dict(failed=True, msg="src and dest are required")
if tmp is None:
tmp = self._make_tmp_path()
if faf:
#FIXME: issue deprecation warning for first_available_file, use with_first_found or lookup('first_found',...) instead
found = False
for fn in faf:
fn_orig = fn
fnt = self._templar.template(fn)
fnd = self._loader.path_dwim(self._task._role_._role_path, 'templates', fnt)
if not os.path.exists(fnd):
of = task_vars.get('_original_file', None)
if of is not None:
fnd = self._loader.path_dwim(self._task._role_._role_path, 'templates', of)
if os.path.exists(fnd):
source = fnd
found = True
break
if not found:
return dict(failed=True, msg="could not find src in first_available_file list")
else:
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'templates', source)
else:
source = self._loader.path_dwim(source)
# Expand any user home dir specification
dest = self._remote_expand_user(dest, tmp)
directory_prepended = False
if dest.endswith(os.sep):
directory_prepended = True
base = os.path.basename(source)
dest = os.path.join(dest, base)
# template the source data locally & get ready to transfer
try:
with open(source, 'r') as f:
template_data = f.read()
resultant = self._templar.template(template_data, preserve_trailing_newlines=True)
except Exception as e:
return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
local_checksum = checksum_s(resultant)
remote_checksum = self.get_checksum(tmp, dest, not directory_prepended, source=source)
if isinstance(remote_checksum, dict):
# Error from remote_checksum is a dict. Valid return is a str
return remote_checksum
if local_checksum != remote_checksum:
# if showing diffs, we need to get the remote value
dest_contents = ''
# FIXME: still need to implement diff mechanism
#if self.runner.diff:
# # using persist_files to keep the temp directory around to avoid needing to grab another
# dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, task_vars=task_vars, persist_files=True)
# if 'content' in dest_result.result:
# dest_contents = dest_result.result['content']
# if dest_result.result['encoding'] == 'base64':
# dest_contents = base64.b64decode(dest_contents)
# else:
# raise Exception("unknown encoding, failed: %s" % dest_result.result)
xfered = self._transfer_data(self._connection._shell.join_path(tmp, 'source'), resultant)
# fix file permissions when the copy is done as a different user
if self._connection_info.become and self._connection_info.become_user != 'root':
self._remote_chmod('a+r', xfered, tmp)
# run the copy module
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=xfered,
dest=dest,
original_basename=os.path.basename(source),
follow=True,
),
)
result = self._execute_module(module_name='copy', module_args=new_module_args, task_vars=task_vars)
if result.get('changed', False):
result['diff'] = dict(before=dest_contents, after=resultant)
return result
else:
# when running the file module based on the template data, we do
# not want the source filename (the name of the template) to be used,
# since this would mess up links, so we clear the src param and tell
# the module to follow links. When doing that, we have to set
# original_basename to the template just in case the dest is
# a directory.
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=None,
original_basename=os.path.basename(source),
follow=True,
),
)
return self._execute_module(module_name='file', module_args=new_module_args, task_vars=task_vars)
|
nghia-huynh/gem5-stable | refs/heads/master | src/arch/x86/isa/insts/general_purpose/input_output/general_io.py | 89 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop IN_R_I {
.adjust_imm trimImm(8)
limm t1, imm, dataSize=asz
mfence
ld reg, intseg, [1, t1, t0], "IntAddrPrefixIO << 3", addressSize=8, \
nonSpec=True
mfence
};
def macroop IN_R_R {
zexti t2, regm, 15, dataSize=8
mfence
ld reg, intseg, [1, t2, t0], "IntAddrPrefixIO << 3", addressSize=8, \
nonSpec=True
mfence
};
def macroop OUT_I_R {
.adjust_imm trimImm(8)
limm t1, imm, dataSize=8
mfence
st reg, intseg, [1, t1, t0], "IntAddrPrefixIO << 3", addressSize=8, \
nonSpec=True
mfence
};
def macroop OUT_R_R {
zexti t2, reg, 15, dataSize=8
mfence
st regm, intseg, [1, t2, t0], "IntAddrPrefixIO << 3", addressSize=8, \
nonSpec=True
mfence
};
'''
|
swent10/support-tools | refs/heads/master | wiki_to_md/impl/constants.py | 151 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants used during conversion."""
import re
# These are the various different matching possibilities Google Code
# recognizes. As matches are made, the respective handler class method is
# is called, which can do what it wishes with the match.
# The pragmas:
PRAGMA_NAMES = ["summary", "labels", "sidebar"]
PRAGMA_RE = re.compile(r"^#(" + "|".join(PRAGMA_NAMES) + r")(.*)$")
# Whitespace:
WHITESPACE_RE = re.compile(r"\s+")
INDENT_RE = re.compile(r"\A\s*")
# Code blocks:
START_CODEBLOCK_RE = re.compile(r"^{{{$")
END_CODEBLOCK_RE = re.compile(r"^}}}$")
# Line rules. These rules consume an entire line:
LINE_FORMAT_RULES = [
r"""(?P<HRule>
^
----+
$
)""",
r"""(?P<Heading>
^
=+\s* # Matches the leading delimiter
.* # Matches the heading title text
\s*=+\s* # Matches the trailing delimiter
$
)""",
]
LINE_FORMAT_RE = re.compile("(?x)" + "|".join(LINE_FORMAT_RULES), re.UNICODE)
# General formatting rules:
SIMPLE_FORMAT_RULE = r"""
(?P<{0}>
(?:
(?<=\W|_) # Match only if preceded by an authorized delimiter
{1} # The opening format character
) |
(?:
{1} # Or match the closing format character...
(?=\W|_) # But only if followed by an authorized delimiter
) |
(?:
^ # Or match the format character at the start of a line...
{1}
) |
(?:
{1} # Or at the end of a line.
$
)
)
"""
URL_SCHEMA_RULE = r"(https?|ftp|nntp|news|mailto|telnet|file|irc)"
OPTIONAL_DESC_RULE = r"(?:\s+[^]]+)?"
VALID_PAGENAME = r"(([A-Za-z0-9][A-Za-z0-9_]*)?[A-Za-z0-9])"
# Link anchors use the Fragment ID pattern from RFC 1630.
# Dropping the quotes for security considerations.
XALPHA_RULE = r"[A-Za-z0-9%$-_@.&!*\(\),]"
# Only WikiWords matching this pattern are detected and autolinked in the text.
WIKIWORD_AUTOLINK_RULE = (
r"(?:[A-Z][a-z0-9]+_*)+(?:[A-Z][a-z0-9]+)(?:[#]{0}*?)?".format(XALPHA_RULE))
WIKIWORD_RULE = r"(?:{0}?(?:[#]{1}*?)?)".format(VALID_PAGENAME, XALPHA_RULE)
# "Plugins" are anything that looks like an XML/HTML tag.
PLUGIN_NAME = r"[a-zA-Z0-9_\-]+" # Matches a plugin name.
PLUGIN_ID = r"({0}:)?{0}".format(PLUGIN_NAME) # Matches a namespace and name.
PLUGIN_PARAM = r"""({0})\s*=\s*("[^"]*"|'[^']*'|\S+)""".format(PLUGIN_NAME)
PLUGIN = r"<{0}(?:\s+{1})*\s*/?>".format(PLUGIN_ID, PLUGIN_PARAM)
PLUGIN_END = r"</{0}>".format(PLUGIN_ID)
PLUGIN_ID_RE = re.compile(PLUGIN_ID, re.UNICODE)
PLUGIN_PARAM_RE = re.compile(PLUGIN_PARAM, re.UNICODE)
PLUGIN_RE = re.compile(PLUGIN, re.UNICODE)
PLUGIN_END_RE = re.compile(PLUGIN_END, re.UNICODE)
TEXT_FORMAT_RULES = [
SIMPLE_FORMAT_RULE.format("Bold", r"\*"),
SIMPLE_FORMAT_RULE.format("Italic", "_"),
SIMPLE_FORMAT_RULE.format("Strikethrough", "~~"),
r"\^(?P<Superscript>.+?)\^",
r",,(?P<Subscript>.+?),,",
r"`(?P<InlineCode>.+?)`",
r"\{\{\{(?P<InlineCode2>.+?)\}\}\}",
r"""# Matches an entire table cell
(?P<TableCell>
(?:\|\|)+ # Any number of start markers, to support rowspan
.*? # Text of the table cell
(?=\|\|) # Assertion that we have a table cell end
)""",
r"(?P<TableRowEnd>\|\|\s*$)",
r"""# Matches a freestanding URL in the source text.
(?P<Url>
\b(?:{0}://|(mailto:)) # Matches supported URL schemas
[^\s'\"<]+ # Match at least one character that is
# authorized within a URL.
[^\s'\"<.,}})\]]+ # After that, match all the way up to the first
# character that looks like a terminator.
)""".format(URL_SCHEMA_RULE),
r"""# Matches bracketed URLs: [http://foo.bar An optional description]
(?P<UrlBracket>
\[
(?:{0}://|(mailto:)) # Matches supported URL schemas
[^]\s]+ # Matches up to the closing bracket or whitespace
{1} # Matches the optional URL description
\]
)""".format(URL_SCHEMA_RULE, OPTIONAL_DESC_RULE),
r"""# Matches a WikiWord embedded in the text.
(?:
(?<![A-Za-z0-9\[]) # Matches the WikiWord only if it's not preceded
# by an alphanumeric character or a bracket.
(?P<WikiWord>
!? # The WikiWord is preceded by an optional exclamation
# mark, which makes it not a link. However, we still
# need to match it as being a link, so that we can strip
# the exclamation mark from the resulting plaintext WikiWord.
{0} # The WikiWord itself
)
(?![A-Za-z0-9]) # Matches the WikiWord only if it's not followed
# by alphanumeric characters.
)""".format(WIKIWORD_AUTOLINK_RULE),
r"""# Matches a forced/named WikiLink: [WikiWord an optional description]
(?P<WikiWordBracket>
\[
{0} # Matches the WikiWord
{1} # Matches the optional WikiLink description
\]
)""".format(WIKIWORD_RULE, OPTIONAL_DESC_RULE),
r"""# Matches an issue reference.
(?P<IssueLink>
(
\b([Ii][Ss][Ss][Uu][Ee]|[Bb][Uu][Gg])\s*\#?
)
\d+\b
)
""",
r"""# Matches a revision reference.
(?P<RevisionLink>
(
\b[Rr]([Ee][Vv][Ii][Ss][Ii][Oo][Nn]\s*\#?)?
)
\d+\b
)
""",
r"(?P<Plugin>{0})".format(PLUGIN),
r"(?P<PluginEnd>{0})".format(PLUGIN_END),
r"""# Matches a variable being used, defined in a plugin or globally.
%%(?P<Variable>[\w|_|\-]+)%%"""
]
TEXT_FORMAT_RE = re.compile("(?x)" + "|".join(TEXT_FORMAT_RULES), re.UNICODE)
# For verification of YouTube video IDs.
YOUTUBE_VIDEO_ID_RE = re.compile("^[a-zA-Z0-9_-]+$")
# List types:
LIST_TYPES = {
"1": "numeric",
"#": "numeric",
"*": "bullet",
" ": "blockquote",
}
|
2ndQuadrant/ansible | refs/heads/master | lib/ansible/modules/cloud/google/gcp_storage_object.py | 12 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_storage_object
description:
- Upload or download a file from a GCS bucket.
short_description: Creates a GCP Object
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
action:
description:
- Upload or download from the bucket.
required: false
choices:
- download
- upload
overwrite:
description:
- "'Overwrite the file on the bucket/local machine. If overwrite is false and
a difference exists between GCS + local, module will fail with error' ."
required: false
type: bool
src:
description:
- Source location of file (may be local machine or cloud depending on action).
required: false
dest:
description:
- Destination location of file (may be local machine or cloud depending on action).
required: false
bucket:
description:
- The name of the bucket.
required: false
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a object
gcp_storage_object:
name: ansible-storage-module
action: download
bucket: ansible-bucket
src: modules.zip
dest: "~/modules.zip"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
action:
description:
- Upload or download from the bucket.
returned: success
type: str
overwrite:
description:
- "'Overwrite the file on the bucket/local machine. If overwrite is false and a
difference exists between GCS + local, module will fail with error' ."
returned: success
type: bool
src:
description:
- Source location of file (may be local machine or cloud depending on action).
returned: success
type: str
dest:
description:
- Destination location of file (may be local machine or cloud depending on action).
returned: success
type: str
bucket:
description:
- The name of the bucket.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import os
import mimetypes
import hashlib
import base64
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
action=dict(type='str', choices=['download', 'upload']),
overwrite=dict(type='bool'),
src=dict(type='path'),
dest=dict(type='path'),
bucket=dict(type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/devstorage.full_control']
remote_object = fetch_resource(module, self_link(module))
local_file_exists = os.path.isfile(local_file_path(module))
# Check if files exist.
if module.params['action'] == 'download' and not remote_object:
module.fail_json(msg="File does not exist in bucket")
if module.params['action'] == 'upload' and not local_file_exists:
module.fail_json(msg="File does not exist on disk")
# Check if we'll be overwriting files.
if not module.params['overwrite']:
remote_object['changed'] = False
if module.params['action'] == 'download' and local_file_exists:
# If files differ, throw an error
if get_md5_local(local_file_path(module)) != remote_object['md5Hash']:
module.fail_json(msg="Local file is different than remote file")
# If files are the same, module is done running.
else:
module.exit_json(**remote_object)
elif module.params['action'] == 'upload' and remote_object:
# If files differ, throw an error
if get_md5_local(local_file_path(module)) != remote_object['md5Hash']:
module.fail_json(msg="Local file is different than remote file")
# If files are the same, module is done running.
else:
module.exit_json(**remote_object)
# Upload/download the files
auth = GcpSession(module, 'storage')
if module.params['action'] == 'download':
results = download_file(module)
else:
results = upload_file(module)
module.exit_json(**results)
def download_file(module):
auth = GcpSession(module, 'storage')
data = auth.get(media_link(module))
with open(module.params['dest'], 'w') as f:
f.write(data.text.encode('utf8'))
return fetch_resource(module, self_link(module))
def upload_file(module):
auth = GcpSession(module, 'storage')
with open(module.params['src'], 'r') as f:
results = return_if_object(module, auth.post_contents(upload_link(module), f, object_headers(module)))
results['changed'] = True
return results
def get_md5_local(path):
md5 = hashlib.md5()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
md5.update(chunk)
return base64.b64encode(md5.digest())
def get_md5_remote(module):
resource = fetch_resource(module, self_link(module))
return resource.get('md5Hash')
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'storage')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
if module.params['action'] == 'download':
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}".format(**module.params)
else:
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{dest}".format(**module.params)
def local_file_path(module):
if module.params['action'] == 'download':
return module.params['dest']
else:
return module.params['src']
def media_link(module):
if module.params['action'] == 'download':
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{src}?alt=media".format(**module.params)
else:
return "https://www.googleapis.com/storage/v1/b/{bucket}/o/{dest}?alt=media".format(**module.params)
def upload_link(module):
return "https://www.googleapis.com/upload/storage/v1/b/{bucket}/o?uploadType=media&name={dest}".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def object_headers(module):
return {
"name": module.params['dest'],
"Content-Type": mimetypes.guess_type(module.params['src'])[0],
"Content-Length": str(os.path.getsize(module.params['src'])),
}
if __name__ == '__main__':
main()
|
ghtmtt/QGIS | refs/heads/master | python/plugins/db_manager/sqledit.py | 34 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ScriptEdit.py
---------------------
Date : February 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'February 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import QColor, QFont, QKeySequence
from qgis.PyQt.QtWidgets import QShortcut
from qgis.PyQt.Qsci import QsciScintilla, QsciLexerSQL
from qgis.core import QgsSettings
class SqlEdit(QsciScintilla):
LEXER_PYTHON = 0
LEXER_R = 1
def __init__(self, parent=None):
QsciScintilla.__init__(self, parent)
self.mylexer = None
self.api = None
self.setCommonOptions()
self.initShortcuts()
def setCommonOptions(self):
# Enable non-ASCII characters
self.setUtf8(True)
# Default font
font = QFont()
font.setFamily('Courier')
font.setFixedPitch(True)
font.setPointSize(10)
self.setFont(font)
self.setMarginsFont(font)
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
self.setWrapMode(QsciScintilla.WrapWord)
self.setWrapVisualFlags(QsciScintilla.WrapFlagByText,
QsciScintilla.WrapFlagNone, 4)
self.setSelectionForegroundColor(QColor('#2e3436'))
self.setSelectionBackgroundColor(QColor('#babdb6'))
# Show line numbers
self.setMarginWidth(1, '000')
self.setMarginLineNumbers(1, True)
self.setMarginsForegroundColor(QColor('#2e3436'))
self.setMarginsBackgroundColor(QColor('#babdb6'))
# Highlight current line
self.setCaretLineVisible(True)
self.setCaretLineBackgroundColor(QColor('#d3d7cf'))
# Folding
self.setFolding(QsciScintilla.BoxedTreeFoldStyle)
self.setFoldMarginColors(QColor('#d3d7cf'), QColor('#d3d7cf'))
# Mark column 80 with vertical line
self.setEdgeMode(QsciScintilla.EdgeLine)
self.setEdgeColumn(80)
self.setEdgeColor(QColor('#eeeeec'))
# Indentation
self.setAutoIndent(True)
self.setIndentationsUseTabs(False)
self.setIndentationWidth(4)
self.setTabIndents(True)
self.setBackspaceUnindents(True)
self.setTabWidth(4)
# Autocomletion
self.setAutoCompletionThreshold(2)
self.setAutoCompletionSource(QsciScintilla.AcsAPIs)
self.setAutoCompletionCaseSensitivity(False)
# Load font from Python console settings
settings = QgsSettings()
fontName = settings.value('pythonConsole/fontfamilytext', 'Monospace')
fontSize = int(settings.value('pythonConsole/fontsize', 10))
self.defaultFont = QFont(fontName)
self.defaultFont.setFixedPitch(True)
self.defaultFont.setPointSize(fontSize)
self.defaultFont.setStyleHint(QFont.TypeWriter)
self.defaultFont.setBold(False)
self.boldFont = QFont(self.defaultFont)
self.boldFont.setBold(True)
self.italicFont = QFont(self.defaultFont)
self.italicFont.setItalic(True)
self.setFont(self.defaultFont)
self.setMarginsFont(self.defaultFont)
self.initLexer()
def initShortcuts(self):
(ctrl, shift) = (self.SCMOD_CTRL << 16, self.SCMOD_SHIFT << 16)
# Disable some shortcuts
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('D') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl +
shift)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('T') + ctrl)
# self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Z") + ctrl)
# self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord("Y") + ctrl)
# Use Ctrl+Space for autocompletion
self.shortcutAutocomplete = QShortcut(QKeySequence(Qt.CTRL +
Qt.Key_Space), self)
self.shortcutAutocomplete.setContext(Qt.WidgetShortcut)
self.shortcutAutocomplete.activated.connect(self.autoComplete)
def autoComplete(self):
self.autoCompleteFromAll()
def initLexer(self):
self.mylexer = QsciLexerSQL()
colorDefault = QColor('#2e3436')
colorComment = QColor('#c00')
colorCommentBlock = QColor('#3465a4')
colorNumber = QColor('#4e9a06')
colorType = QColor('#4e9a06')
colorKeyword = QColor('#204a87')
colorString = QColor('#ce5c00')
self.mylexer.setDefaultFont(self.defaultFont)
self.mylexer.setDefaultColor(colorDefault)
self.mylexer.setColor(colorComment, 1)
self.mylexer.setColor(colorNumber, 2)
self.mylexer.setColor(colorString, 3)
self.mylexer.setColor(colorString, 4)
self.mylexer.setColor(colorKeyword, 5)
self.mylexer.setColor(colorString, 6)
self.mylexer.setColor(colorString, 7)
self.mylexer.setColor(colorType, 8)
self.mylexer.setColor(colorCommentBlock, 12)
self.mylexer.setColor(colorString, 15)
self.mylexer.setFont(self.italicFont, 1)
self.mylexer.setFont(self.boldFont, 5)
self.mylexer.setFont(self.boldFont, 8)
self.mylexer.setFont(self.italicFont, 12)
self.setLexer(self.mylexer)
def lexer(self):
return self.mylexer
def setMarginVisible(self, visible):
pass
|
emd/mitpci | refs/heads/master | mitpci/interferometer/__init__.py | 1 | from demodulated import Lissajous, Phase
from toroidal_correlation import ToroidalCorrelation
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.