repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mjsauvinen/P4UL
|
pyRaster/tif2NumpyTile.py
|
1
|
1956
|
#!/usr/bin/env python3
import sys
import argparse
import numpy as np
from mapTools import *
from utilities import filesFromList, writeLog
from plotTools import addImagePlot
import matplotlib.pyplot as plt
'''
Author: Mikko Auvinen
mikko.auvinen@helsinki.fi
University of Helsinki &
Finnish Meteorological Institute
'''
#==========================================================#
parser = argparse.ArgumentParser(prog='tif2NumpyTile.py')
parser.add_argument("-f", "--filename",type=str, help="Input tif-image file name.")
parser.add_argument("-fo", "--fileout",type=str, help="Output npz file name.")
parser.add_argument("-r", "--reso",type=float, help="Resolution of the tif-image.")
parser.add_argument("-xo", "--xorig",type=float, nargs=2,default=[0.,0.],\
help="Coords [N,E] of the tif-images top-left corner. Default=[0,0]")
parser.add_argument("-p", "--printOn", help="Print the numpy array data.",\
action="store_true", default=False)
parser.add_argument("-pp", "--printOnly", help="Only print the numpy array data. Don't save.",\
action="store_true", default=False)
parser.add_argument("-s", "--scale",type=float, default=1.,\
help="Scale factor for the output. Default=1.")
args = parser.parse_args()
writeLog( parser, args, args.printOnly )
#==========================================================#
# Renaming, nothing more.
filename = args.filename
fileout = args.fileout
reso = args.reso
ROrig = args.xorig
printOn = args.printOn
printOnly = args.printOnly
sc = args.scale
R = openTifAsNumpy(filename)
dPx = np.array([sc*reso, sc*reso])
Rdict = {'R' : R, 'GlobOrig' : ROrig, 'gridRot' : 0., 'dPx' : dPx}
if( not printOnly ):
print(' Writing file {} ... '.format(fileout) )
saveTileAsNumpyZ( fileout, Rdict)
print(' ... done! ')
if( printOn or printOnly ):
pfig = plt.figure(num=1, figsize=(10.,10.))
pfig = addImagePlot( pfig, R, fileout, gridOn=True )
plt.show()
|
mit
| 4,073,113,538,880,150,500
| 34.563636
| 95
| 0.641616
| false
| 3.165049
| false
| false
| false
|
Kronopt/pipUpdateAll
|
pipUpdateAll.py
|
1
|
3682
|
#!python2
# coding: utf-8
"""
PIP UPDATE ALL
Updates outdated python modules using pip
Checks outdated modules using "pip list --outdated --format columns", parses that column to only show relevant
information (name, current version, new version) and then updates all detected modules using "pip install -U" followed
by each module's name
DEPENDENCIES:
- Python 2.7
- pip
HOW TO RUN:
- Directly, by double clicking the script.
"""
import subprocess
import sys
from time import sleep
__author__ = 'Pedro HC David, https://github.com/Kronopt'
__credits__ = ['Pedro HC David']
__version__ = '1.0'
__date__ = '02:40h, 16/12/2016'
__status__ = 'Finished'
def pip_list_columns_parser(pip_list_columns_format_output):
"""
Parses the output of "pip list --outdated --format columns" into a dictionary
PARAMETERS:
pip_list_columns_format_output : str
output of "pip list --outdated --format columns"
RETURNS: {{module_name : (current_version, new_version)}
Module_name associated with its current_version and new_version
"""
# Column format:
#
# Package Version Latest Type
# ------------- --------- --------- ----
# module_1_name version_1 version_2 type
# module_2_name version_1 version_2 type
final_dictionary = {}
# removes "Package", "Version", etc and "----"
modules_and_versions = pip_list_columns_format_output.split()[8:]
number_of_modules = len(modules_and_versions)/4
# parses list
for module_number in xrange(number_of_modules):
list_position = module_number*4
final_dictionary[modules_and_versions[list_position]] = (modules_and_versions[list_position+1],
modules_and_versions[list_position+2])
return final_dictionary
if __name__ == '__main__':
# location of python executable, avoids dependency on windows PATH
python_executable = sys.executable
# checking if pip is installed
try:
pip_version_output = subprocess.check_output([python_executable, "-m", "pip", "--version"])
pip_version = pip_version_output.split()[1]
except subprocess.CalledProcessError:
print "Python cannot locate pip..."
sys.exit()
print "Modules to be updated using pip version", pip_version + ":"
# Get modules out of date
modules_to_update_columns = subprocess.check_output(
[python_executable, "-m", "pip", "list", "--outdated", "--format", "columns"])
# dictionary in the format {module_name : (current_version, new_version)}
modules_to_update = pip_list_columns_parser(modules_to_update_columns)
if len(modules_to_update) > 0:
module_names = []
# shows modules out of date and each respective current versions and new versions
for module_name, (current_version, new_version) in sorted(modules_to_update.iteritems()):
print module_name + ":", current_version, "->", new_version
module_names.append(module_name)
print
no_correct_answer_given_yet = True
while no_correct_answer_given_yet:
answer = raw_input("Do you wish to continue (y/n)? ")
if answer == "y":
# call "pip install -U" with every outdated module name as parameters
subprocess.call([python_executable, "-m", "pip", "install", "--upgrade"] + module_names)
no_correct_answer_given_yet = False
elif answer == "n":
print "Update canceled"
no_correct_answer_given_yet = False
else:
print "All modules are up to date"
sleep(2)
|
mit
| 5,579,989,667,137,161,000
| 33.735849
| 118
| 0.625204
| false
| 3.942184
| false
| false
| false
|
leebird/legonlp
|
utils/runner.py
|
1
|
1644
|
import sys
import os
import codecs
class Runner(object):
runnerName = None
def __init__(self):
'''
read input files and process
run directly on input files
run directly on input dir
process output
'''
pass
def run(self, args):
'''
inputs: a list of (dir, suffix) pairs
outputs: a list of (dir, suffix) pairs
Note that dir should be an absolute path
'''
raise NotImplementedError
def read_file(self, filepath):
if not os.path.isfile(filepath):
print >> sys.stderr, 'file not found: ' + filepath
return None
f = codecs.open(filepath, 'r', 'utf-8')
text = f.read().strip()
f.close()
return text
def write_file(self, content, filepath):
f = codecs.open(filepath, 'w', 'utf-8')
f.write(content)
f.close()
def get_files(self, dirname, sux, docList):
'''
get a list of path for the docList
'''
return [os.path.join(dirname, doc + sux) for doc in docList]
def get_io_files(self, dirsux, docList):
"""
get a zipped list of paths for all the dirs and the docList
:param dirsux: a list of (dir, suffix) pairs
:type dirsux: list
:param docList: a list of doc name
:type docList: list
:return: a zipped list of dir+file+suffix tuples
:rtype: list
"""
res = []
for ds in dirsux:
dirname, sux = ds[:2]
res.append(self.get_files(dirname, sux, docList))
return zip(*res)
|
gpl-2.0
| 7,072,275,004,045,526,000
| 23.909091
| 68
| 0.544404
| false
| 3.98063
| false
| false
| false
|
crossbario/crossbar-fabric-cli
|
cbsh/idl/loader.py
|
1
|
16965
|
#####################################################################################
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g.
# you have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <https://www.gnu.org/licenses/gpl-3.0.en.html>.
#
#####################################################################################
import os
import json
import argparse
import hashlib
import pprint
from typing import Dict, Any # noqa
import six
import click
from cbsh.util import hl
from cbsh.reflection import Schema
import txaio
txaio.use_asyncio()
def extract_attributes(item, allowed_attributes=None):
num_attrs = item.AttributesLength()
attrs = [item.Attributes(i) for i in range(num_attrs)]
attrs_dict = {
x.Key().decode('utf8'): x.Value().decode('utf8')
if x.Value().decode('utf8') not in ['0'] else None
for x in attrs
}
if allowed_attributes:
for attr in attrs_dict:
if attr not in allowed_attributes:
raise Exception(
'invalid XBR attribute "{}" - must be one of {}'.format(
attr, allowed_attributes))
return attrs_dict
def extract_docs(item):
num_docs = item.DocumentationLength()
item_docs = [
item.Documentation(i).decode('utf8').strip() for i in range(num_docs)
]
return item_docs
INTERFACE_ATTRS = ['type', 'uuid']
INTERFACE_MEMBER_ATTRS = ['type', 'stream']
INTERFACE_MEMBER_TYPES = ['procedure', 'topic']
INTERFACE_MEMBER_STREAM_VALUES = [None, 'in', 'out', 'inout']
EXTRACT_ATTRS_RAW = False
_BASETYPE_ID2NAME = {
None: 'Unknown',
0: 'none',
1: 'utype',
2: 'bool',
3: 'int8',
4: 'uint8',
5: 'int16',
6: 'uint16',
7: 'int32',
8: 'uint32',
9: 'int64',
10: 'uint64',
11: 'float',
12: 'double',
13: 'string',
14: 'vector',
15: 'object',
16: 'union',
}
def read_reflection_schema(buf, log=None):
"""
Read a binary FlatBuffers buffer that is typed according to the FlatBuffers
reflection schema.
The function returns extracted information in a plain, JSON serializable dict.
"""
if not log:
log = txaio.make_logger()
_schema = Schema.GetRootAsSchema(buf, 0)
_root = _schema.RootTable()
if _root:
root_name = _root.Name().decode('utf8').strip()
else:
root_name = None
_file_ident = _schema.FileIdent().decode('utf8').strip()
if _file_ident == '':
_file_ident = None
_file_ext = _schema.FileExt().decode('utf8').strip()
if _file_ext == '':
_file_ext = None
m = hashlib.sha256()
m.update(buf)
schema_meta = {
'bfbs_size': len(buf),
'bfbs_sha256': m.hexdigest(),
'file_ident': _file_ident,
'file_ext': _file_ext,
'root': root_name,
}
schema = None # type: dict
schema = {
'meta': schema_meta,
'tables': [],
'enums': [],
'services': [],
}
schema_by_uri = None # type: dict
schema_by_uri = {
'meta': schema_meta,
'types': {},
}
enums = []
objects = []
services = []
fqn2type = dict() # type: Dict[str, Any]
enum_cnt = 0
object_cnt = 0
service_cnt = 0
typerefs_cnt = 0
typerefs_error_cnt = 0
for i in range(_schema.EnumsLength()):
item = _schema.Enums(i)
name = item.Name().decode('utf8')
if name in fqn2type:
raise Exception('duplicate name "{}"'.format(name))
enum_cnt += 1
for i in range(_schema.ObjectsLength()):
item = _schema.Objects(i)
name = item.Name().decode('utf8')
if name in fqn2type:
raise Exception('duplicate name "{}"'.format(name))
object_cnt += 1
for i in range(_schema.ServicesLength()):
item = _schema.Services(i)
name = item.Name().decode('utf8')
if name in fqn2type:
raise Exception('duplicate name "{}"'.format(name))
service_cnt += 1
log.info('Processing schema with {} enums, {} objects and {} services ...'.
format(enum_cnt, object_cnt, service_cnt))
# enums
#
num_enums = _schema.EnumsLength()
for i in range(num_enums):
# extract enum base information
#
_enum = _schema.Enums(i)
enum_name = _enum.Name().decode('utf8')
log.debug('processing enum {} ("{}")'.format(i, enum_name))
enum = {
# '_index': i,
'type': 'enum',
'name': enum_name,
'docs': extract_docs(_enum),
}
if EXTRACT_ATTRS_RAW:
enum['attr'] = extract_attributes(_enum)
# extract enum values
#
enum_values_dict = dict() # type: Dict[str, Any]
for j in range(_enum.ValuesLength()):
_enum_value = _enum.Values(j)
enum_value_name = _enum_value.Name().decode('utf8')
enum_value = {
'docs': extract_docs(_enum_value),
# enum values cannot have attributes
}
if enum_value_name in enum_values_dict:
raise Exception(
'duplicate enum value "{}"'.format(enum_value_name))
enum_values_dict[enum_value_name] = enum_value
enum['values'] = enum_values_dict
if enum_name in schema_by_uri['types']:
raise Exception(
'unexpected duplicate definition for qualified name "{}"'.
format(enum_name))
enums.append(enum)
schema_by_uri['types'][enum_name] = enum
# objects (tables/structs)
#
for i in range(_schema.ObjectsLength()):
_obj = _schema.Objects(i)
obj_name = _obj.Name().decode('utf8')
object_type = 'struct' if _obj.IsStruct() else 'table'
obj = {
# '_index': i,
'type': object_type,
'name': obj_name,
'docs': extract_docs(_obj),
}
if EXTRACT_ATTRS_RAW:
obj['attr'] = extract_attributes(_obj)
# extract fields
num_fields = _obj.FieldsLength()
fields = []
fields_by_name = {}
for j in range(num_fields):
_field = _obj.Fields(j)
field_name = _field.Name().decode('utf8')
log.debug('processing field {} ("{}")'.format(i, field_name))
_field_type = _field.Type()
_field_index = int(_field_type.Index())
_field_base_type = _BASETYPE_ID2NAME.get(_field_type.BaseType(),
None)
_field_element = _BASETYPE_ID2NAME.get(_field_type.Element(), None)
if _field_element == 'none':
_field_element = None
# FIXME
# if _field_element == 'object':
# el = _schema.Objects(_field_type.Element())
# if isinstance(el, reflection.Type) and hasattr(el, 'IsStruct'):
# _field_element = 'struct' if el.Element().IsStruct(
# ) else 'table'
field = {
# '_index': j,
'name': field_name,
'id': int(_field.Id()),
'offset': int(_field.Offset()),
'base_type': _field_base_type,
}
if _field_element:
# vector
field['element_type'] = _field_element
if _field_index != -1:
# field['field_index'] = _field_index
if _field_base_type in [
'object', 'struct'
] or _field_element in ['object', 'struct']:
# obj/struct
if _field_index < _schema.ObjectsLength():
l_obj = _schema.Objects(_field_index)
l_obj_ref = _obj.Name().decode('utf8')
field['ref_category'] = 'struct' if l_obj.IsStruct(
) else 'table'
field['ref_type'] = l_obj_ref
typerefs_cnt += 1
else:
log.info(
'WARNING - referenced table/struct for index {} ("{}.{}") not found'.
format(_field_index, obj_name, field_name))
field['ref_category'] = 'object'
field['ref_type'] = None
typerefs_error_cnt += 1
elif _field_base_type in [
'utype', 'bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'float',
'double', 'string'
]:
# enum
field['ref_category'] = 'enum'
if _field_index < _schema.EnumsLength():
_enum_ref = _schema.Enums(_field_index).Name().decode(
'utf8')
field['ref_type'] = _enum_ref
typerefs_cnt += 1
else:
log.info('WARNING - referenced enum not found')
field['ref_type'] = None
typerefs_error_cnt += 1
else:
raise Exception('unhandled field type: {} {} {} {}'.format(
field_name, _field_base_type, _field_element,
_field_index))
field_docs = extract_docs(_field)
if field_docs:
field['docs'] = field_docs
if EXTRACT_ATTRS_RAW:
_field_attrs = extract_attributes(_field)
if _field_attrs:
field['attr'] = _field_attrs
fields.append(field)
fields_by_name[field_name] = field
obj['fields'] = fields_by_name
if obj['name'] in schema_by_uri['types']:
raise Exception(
'unexpected duplicate definition for qualified name "{}"'.
format(field['name']))
# always append the object here, so we can dereference indexes
# correctly
objects.append(obj)
# skip our "void marker"
if False and obj_name in ['Void']:
pass
else:
schema_by_uri['types'][obj['name']] = obj
# iterate over services
#
num_services = _schema.ServicesLength()
for i in range(num_services):
_service = _schema.Services(i)
service_name = _service.Name().decode('utf8')
service_attrs_dict = extract_attributes(_service, INTERFACE_ATTRS)
service_type = service_attrs_dict.get('type', None)
if service_type != 'interface':
raise Exception(
'invalid value "{}" for attribute "type" in XBR interface'.
format(service_type))
service = {
# '_index': i,
'type': service_type,
'name': service_name,
'docs': extract_docs(_service),
}
if EXTRACT_ATTRS_RAW:
service['attrs'] = service_attrs_dict
else:
service['uuid'] = service_attrs_dict.get('uuid', None)
num_calls = _service.CallsLength()
calls = []
calls_by_name = {}
for j in range(num_calls):
_call = _service.Calls(j)
_call_name = _call.Name().decode('utf8')
call_attrs_dict = extract_attributes(_call)
call_type = call_attrs_dict.get('type', None)
if call_type not in INTERFACE_MEMBER_TYPES:
raise Exception(
'invalid XBR interface member type "{}" - must be one of {}'.
format(call_type, INTERFACE_MEMBER_TYPES))
call_stream = call_attrs_dict.get('stream', None)
if call_stream in ['none', 'None', 'null', 'Null']:
call_stream = None
if call_stream not in INTERFACE_MEMBER_STREAM_VALUES:
raise Exception(
'invalid XBR interface member stream modifier "{}" - must be one of {}'.
format(call_stream, INTERFACE_MEMBER_STREAM_VALUES))
def _decode_type(x):
res = x.Name().decode('utf8')
if res in ['Void', 'wamp.Void']:
res = None
return res
call = {
'type': call_type,
'name': _call_name,
'in': _decode_type(_call.Request()),
'out': _decode_type(_call.Response()),
'stream': call_stream,
# 'id': int(_call.Id()),
# 'offset': int(_call.Offset()),
}
# call['attrs'] = call_attrs_dict
call['docs'] = extract_docs(_call)
calls.append(call)
calls_by_name[_call_name] = call
# service['calls'] = sorted(calls, key=lambda field: field['id'])
service['slots'] = calls_by_name
services.append(service)
if service_name in schema_by_uri['types']:
raise Exception(
'unexpected duplicate definition for qualified name "{}"'.
format(service_name))
else:
schema_by_uri['types'][service_name] = service
if typerefs_error_cnt:
raise Exception(
'{} unresolved type references encountered in schema'.format(
typerefs_error_cnt))
schema['enums'] = sorted(enums, key=lambda enum: enum['name'])
schema['tables'] = sorted(objects, key=lambda obj: obj['name'])
schema['services'] = sorted(services, key=lambda service: service['name'])
return schema_by_uri
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'infile', help='FlatBuffers binary schema input file (.bfbs)')
parser.add_argument(
'-o', '--outfile', help='FlatBuffers JSON schema output (.json)')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Enable verbose processing output.')
parser.add_argument(
'-d', '--debug', action='store_true', help='Enable debug output.')
options = parser.parse_args()
log = txaio.make_logger()
txaio.start_logging(level='debug' if options.debug else 'info')
infile_path = os.path.abspath(options.infile)
with open(infile_path, 'rb') as f:
buf = f.read()
log.info('Loading FlatBuffers binary schema ({} bytes) ...'.format(
len(buf)))
try:
schema = read_reflection_schema(buf, log=log)
except Exception as e:
log.error(e)
if True:
schema['meta']['file_name'] = os.path.basename(options.infile)
schema['meta']['file_path'] = infile_path
with open(options.outfile, 'wb') as f:
outdata = json.dumps(
schema,
ensure_ascii=False,
sort_keys=False,
indent=4,
separators=(', ', ': ')).encode('utf8')
f.write(outdata)
cnt_bytes = len(outdata)
cnt_defs = len(schema['types'].keys())
log.info(
'FlatBuffers JSON schema data written ({} bytes, {} defs).'.format(
cnt_bytes, cnt_defs))
if options.verbose:
log.info('Schema metadata:')
schema_meta_str = pprint.pformat(schema['meta'])
# log.info(schema_meta_str)
# log.info('{}'.format(schema_meta_str))
print(schema_meta_str)
for o in schema['types'].values():
if o['type'] == 'interface':
log.info('interface: {}'.format(hl(o['name'], bold=True)))
for s in o['slots'].values():
log.info('{:>12}: {}'.format(s['type'], hl(s['name'])))
|
mit
| 3,829,342,878,639,213,000
| 31.009434
| 97
| 0.519776
| false
| 4.0625
| false
| false
| false
|
omargammoh/rpislave
|
website/processing.py
|
1
|
6269
|
from bson import json_util
import multiprocessing
from website.models import Conf
from time import time, sleep
import inspect
import subprocess
import json
try:
import signal
except:
print "signal cannot be imported"
def execute(cmd, daemon=False):
if daemon:
_ = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return None
else:
return subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
def fix_corrupt_db():
conf = get_conf()
write_json_file(conf, "/home/pi/data/conf")
execute('sudo rm /home/pi/rpislave/db.sqlite3')
execute('sudo reboot')
return None
def read_json_file(fp):
try:
f = file(fp, "r")
s = f.read()
f.close()
js = json.loads(s)
except:
js = None
return js
def write_json_file(js, fp):
f = file(fp, "w")
f.write(json.dumps(js))
f.close()
class Timeout:
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise BaseException(self.error_message)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.setitimer(signal.ITIMER_REAL, self.seconds)
def __exit__(self, type, value, traceback):
signal.alarm(0)
def filter_kwargs(func, kwargs_input):
"""
creates the kwargs of func from kwargs_input
func: function to inspect
"""
argnames,_,_,defaults = inspect.getargspec(func)
if defaults is None: defaults=[]
required_args = set(argnames[:len(argnames)-len(defaults)])
optional_args = set(argnames[len(argnames)-len(defaults):])
kwargs_needed = {k:v for (k,v) in kwargs_input.iteritems() if k in required_args.union(optional_args) }
return kwargs_needed
def get_pid(command):
"""
gets the pid of the process using the command column in the ps aux table
"""
s = subprocess.Popen("ps aux", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
lines = [line.split(None, 10) for line in s.split("\n") if line.lstrip() != ""]
matches = [line for line in lines if line[-1] == command]
if len(matches)==0:
print "no maches found"
return None
elif len(matches)>1:
print "multiple matches found"
return None
else:
pid = matches[0][1]
return pid
def get_conf():
for ob in Conf.objects.all():
try:
js = json_util.loads(ob.data)
if not ("label" in js):
raise BaseException('no label in conf')
return js
except:
print "!!was not able to parse and get label of a configuration row, skipping"
pass
return None
def fix_malformed_db():
try:
#get conf
print 'fix_malformed_db >> getting conf'
conf_x = get_conf()
#save it on a text file
print 'fix_malformed_db >> saving conf as text'
f = file('/home/pi/rpislave/conf.json', 'w')
f.write(json_util.dumps(conf_x))
f.close()
#remove db
import os
print 'fix_malformed_db >> deleting db'
os.remove('/home/pi/rpislave/db.sqlite3')
#keep a note as a file
print 'fix_malformed_db >> saving log as text'
from datetime import datetime
now = datetime.utcnow()
f = file('/home/pi/data/dbdelete-' + now.strftime('%Y%m%d%H%M%S'),'w')
f.write('we have taken a copy of conf, saved it on disk, deleted the database and restarted. %s' %str(now))
f.close()
#restart
print 'fix_malformed_db >> rebooting'
os.system('sudo reboot')
except:
print "error while trying to fix malformed db"
class MP():
def __init__(self, name, target, request, cmd=None):
self.t1 = time()
self.name = name
self.target = target
self.request = request
self.cmd = cmd if cmd else request.GET.get("cmd", None)
self.dic = {}
def start(self):
app_conf = get_conf()['apps'][self.name]
p = multiprocessing.Process(name=self.name, target=self.target, kwargs=filter_kwargs(func=self.target, kwargs_input=app_conf))
p.start()
def ison(self):
ac = [m for m in multiprocessing.active_children() if m.name == self.name ]
if len(ac) == 0:
return False
else:
#return ac[0].is_alive() #this line does not work when switching to uwsgi and gives the error: can only test a child process, this is due to the fact that uwsgi has many workers
return True
def stop(self):
ac = [m for m in multiprocessing.active_children() if self.name == m.name][0]
if ac:
if ac.pid:
kill_command = "sudo kill -INT %s" % ac.pid
print "stopping process in the good way: %s" % kill_command
s = subprocess.Popen(kill_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read()
else:
print "stopping process in in the hard way"
ac.terminate()
sleep(0.5)
return True
else:
return False
def process_command(self):
lis = []
print "%s" %(self.name)
ison_at_start = self.ison()
if self.cmd is None:
lis.append('no cmd has provided')
elif self.cmd == 'start':
if ison_at_start:
lis.append('process was already running')
else:
self.start()
lis.append('process has been started')
elif self.cmd == 'stop':
if self.stop():
lis.append('terminated process')
else:
lis.append('process was not running')
elif self.cmd == 'status':
self.dic["%s" %self.name] = get_conf()['apps'][self.name]
else:
lis.append("we didnt understand your cmd")
#respond with some info
self.dic['log'] = lis
self.dic['ison'] = self.ison()
self.dic['took'] = "%s seconds" %(time()-self.t1)
|
gpl-2.0
| 3,806,615,012,987,262,500
| 31.148718
| 189
| 0.584304
| false
| 3.751646
| false
| false
| false
|
ilastikdev/ilastik
|
ilastik/applets/thresholdTwoLevels/_OpObjectsSegment.py
|
1
|
10889
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
##############################################################################
# basic python modules
import functools
import logging
logger = logging.getLogger(__name__)
from threading import Lock as ThreadLock
# required numerical modules
import numpy as np
import vigra
import opengm
# basic lazyflow types
from lazyflow.operator import Operator
from lazyflow.slot import InputSlot, OutputSlot
from lazyflow.rtype import SubRegion
from lazyflow.stype import Opaque
from lazyflow.request import Request, RequestPool
# required lazyflow operators
from lazyflow.operators.opLabelVolume import OpLabelVolume
from lazyflow.operators.valueProviders import OpArrayCache
from lazyflow.operators.opCompressedCache import OpCompressedCache
from lazyflow.operators.opReorderAxes import OpReorderAxes
from _OpGraphCut import segmentGC, OpGraphCut
## segment predictions with pre-thresholding
#
# This operator segments an image into foreground and background and makes use
# of a preceding thresholding step. After thresholding, connected components
# are computed and are then considered to be "cores" of objects to be segmented.
# The Graph Cut optimization (see _OpGraphCut.OpGraphCut) is then applied to
# the bounding boxes of the object "cores, enlarged by a user-specified margin.
# The pre-thresholding operation allows to apply Graph Cut segmentation on
# large data volumes, in case the segmented foreground consists of sparse objects
# of limited size and the probability map of the unaries is of high recall, but
# possibly low precision. One particular application for this setup is
# segmentation of synapses in anisotropic 3D Electron Microscopy image stacks.
#
#
# The slot CachedOutput guarantees consistent results, the slot Output computes
# the roi on demand.
#
# The operator inherits from OpGraphCut because they share some details:
# * output meta
# * dirtiness propagation
# * input slots
#
class OpObjectsSegment(OpGraphCut):
name = "OpObjectsSegment"
# thresholded predictions, or otherwise obtained ROI indicators
# (a value of 0 is assumed to be background and ignored)
LabelImage = InputSlot()
# margin around each object (always xyz!)
Margin = InputSlot(value=np.asarray((20, 20, 20)))
# bounding boxes of the labeled objects
# this slot returns an array of dicts with shape (t, c)
BoundingBoxes = OutputSlot(stype=Opaque)
### slots from OpGraphCut ###
## prediction maps
#Prediction = InputSlot()
## graph cut parameter
#Beta = InputSlot(value=.2)
## labeled segmentation image
#Output = OutputSlot()
#CachedOutput = OutputSlot()
def __init__(self, *args, **kwargs):
super(OpObjectsSegment, self).__init__(*args, **kwargs)
def setupOutputs(self):
super(OpObjectsSegment, self).setupOutputs()
# sanity checks
shape = self.LabelImage.meta.shape
assert len(shape) == 5,\
"Prediction maps must be a full 5d volume (txyzc)"
tags = self.LabelImage.meta.getAxisKeys()
tags = "".join(tags)
assert tags == 'txyzc',\
"Label image has wrong axes order"\
"(expected: txyzc, got: {})".format(tags)
# bounding boxes are just one element arrays of type object, but we
# want to request boxes from a specific region, therefore BoundingBoxes
# needs a shape
shape = self.Prediction.meta.shape
self.BoundingBoxes.meta.shape = shape
self.BoundingBoxes.meta.dtype = np.object
self.BoundingBoxes.meta.axistags = vigra.defaultAxistags('txyzc')
def execute(self, slot, subindex, roi, result):
# check the axes - cannot do this in setupOutputs because we could be
# in some invalid intermediate state where the dimensions do not agree
shape = self.LabelImage.meta.shape
agree = [i == j for i, j in zip(self.Prediction.meta.shape, shape)]
assert all(agree),\
"shape mismatch: {} vs. {}".format(self.Prediction.meta.shape,
shape)
if slot == self.BoundingBoxes:
return self._execute_bbox(roi, result)
elif slot == self.Output:
self._execute_graphcut(roi, result)
else:
raise NotImplementedError(
"execute() is not implemented for slot {}".format(str(slot)))
def _execute_bbox(self, roi, result):
cc = self.LabelImage.get(roi).wait()
cc = vigra.taggedView(cc, axistags=self.LabelImage.meta.axistags)
cc = cc.withAxes(*'xyz')
logger.debug("computing bboxes...")
feats = vigra.analysis.extractRegionFeatures(
cc.astype(np.float32),
cc.astype(np.uint32),
features=["Count", "Coord<Minimum>", "Coord<Maximum>"])
feats_dict = {}
feats_dict["Coord<Minimum>"] = feats["Coord<Minimum>"]
feats_dict["Coord<Maximum>"] = feats["Coord<Maximum>"]
feats_dict["Count"] = feats["Count"]
return feats_dict
def _execute_graphcut(self, roi, result):
for i in (0, 4):
assert roi.stop[i] - roi.start[i] == 1,\
"Invalid roi for graph-cut: {}".format(str(roi))
t = roi.start[0]
c = roi.start[4]
margin = self.Margin.value
beta = self.Beta.value
MAXBOXSIZE = 10000000 # FIXME justification??
## request the bounding box coordinates ##
# the trailing index brackets give us the dictionary (instead of an
# array of size 1)
feats = self.BoundingBoxes.get(roi).wait()
mins = feats["Coord<Minimum>"]
maxs = feats["Coord<Maximum>"]
nobj = mins.shape[0]
# these are indices, so they should have an index datatype
mins = mins.astype(np.uint32)
maxs = maxs.astype(np.uint32)
## request the prediction image ##
pred = self.Prediction.get(roi).wait()
pred = vigra.taggedView(pred, axistags=self.Prediction.meta.axistags)
pred = pred.withAxes(*'xyz')
## request the connected components image ##
cc = self.LabelImage.get(roi).wait()
cc = vigra.taggedView(cc, axistags=self.LabelImage.meta.axistags)
cc = cc.withAxes(*'xyz')
# provide xyz view for the output (just need 8bit for segmentation
resultXYZ = vigra.taggedView(np.zeros(cc.shape, dtype=np.uint8),
axistags='xyz')
def processSingleObject(i):
logger.debug("processing object {}".format(i))
# maxs are inclusive, so we need to add 1
xmin = max(mins[i][0]-margin[0], 0)
ymin = max(mins[i][1]-margin[1], 0)
zmin = max(mins[i][2]-margin[2], 0)
xmax = min(maxs[i][0]+margin[0]+1, cc.shape[0])
ymax = min(maxs[i][1]+margin[1]+1, cc.shape[1])
zmax = min(maxs[i][2]+margin[2]+1, cc.shape[2])
ccbox = cc[xmin:xmax, ymin:ymax, zmin:zmax]
resbox = resultXYZ[xmin:xmax, ymin:ymax, zmin:zmax]
nVoxels = ccbox.size
if nVoxels > MAXBOXSIZE:
#problem too large to run graph cut, assign to seed
logger.warn("Object {} too large for graph cut.".format(i))
resbox[ccbox == i] = 1
return
probbox = pred[xmin:xmax, ymin:ymax, zmin:zmax]
gcsegm = segmentGC(probbox, beta)
gcsegm = vigra.taggedView(gcsegm, axistags='xyz')
ccsegm = vigra.analysis.labelVolumeWithBackground(
gcsegm.astype(np.uint8))
# Extended bboxes of different objects might overlap.
# To avoid conflicting segmentations, we find all connected
# components in the results and only take the one, which
# overlaps with the object "core" or "seed", defined by the
# pre-thresholding
seed = ccbox == i
filtered = seed*ccsegm
passed = np.unique(filtered)
assert len(passed.shape) == 1
if passed.size > 2:
logger.warn("ambiguous label assignment for region {}".format(
(xmin, xmax, ymin, ymax, zmin, zmax)))
resbox[ccbox == i] = 1
elif passed.size <= 1:
logger.warn(
"box {} segmented out with beta {}".format(i, beta))
else:
# assign to the overlap region
label = passed[1] # 0 is background
resbox[ccsegm == label] = 1
pool = RequestPool()
#FIXME make sure that the parallel computations fit into memory
for i in range(1, nobj):
req = Request(functools.partial(processSingleObject, i))
pool.add(req)
logger.info("Processing {} objects ...".format(nobj-1))
pool.wait()
pool.clean()
logger.info("object loop done")
# prepare result
resView = vigra.taggedView(result, axistags=self.Output.meta.axistags)
resView = resView.withAxes(*'xyz')
# some labels could have been removed => relabel
vigra.analysis.labelVolumeWithBackground(resultXYZ, out=resView)
def propagateDirty(self, slot, subindex, roi):
super(OpObjectsSegment, self).propagateDirty(slot, subindex, roi)
if slot == self.LabelImage:
# time-channel slices are pairwise independent
# determine t, c from input volume
t_ind = 0
c_ind = 4
t = (roi.start[t_ind], roi.stop[t_ind])
c = (roi.start[c_ind], roi.stop[c_ind])
# set output dirty
start = t[0:1] + (0,)*3 + c[0:1]
stop = t[1:2] + self.Output.meta.shape[1:4] + c[1:2]
roi = SubRegion(self.Output, start=start, stop=stop)
self.Output.setDirty(roi)
elif slot == self.Margin:
# margin affects the whole volume
self.Output.setDirty(slice(None))
|
gpl-3.0
| -4,906,129,953,672,029,000
| 38.740876
| 81
| 0.619616
| false
| 3.953885
| false
| false
| false
|
asweigart/pygcurse
|
examples/shadowtest.py
|
1
|
1621
|
# Simplified BSD License, Copyright 2011 Al Sweigart
import sys
import os
sys.path.append(os.path.abspath('..'))
import pygcurse, pygame
from pygame.locals import *
win = pygcurse.PygcurseWindow(40, 25)
win.autoblit = False
xoffset = 1
yoffset = 1
mousex = mousey = 0
while True:
for event in pygame.event.get(): # the event loop
if event.type == QUIT or event.type == KEYDOWN and event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_UP:
yoffset -= 1
elif event.key == K_DOWN:
yoffset += 1
elif event.key == K_LEFT:
xoffset -= 1
elif event.key == K_RIGHT:
xoffset += 1
elif event.key == K_p:
win.fullscreen = not win.fullscreen
elif event.key == K_d:
win._debugchars()
elif event.type == MOUSEMOTION:
mousex, mousey = win.getcoordinatesatpixel(event.pos, onscreen=False)
win.setscreencolors('white', 'blue', clear=True)
win.fill(bgcolor='red', region=(15, 10, 5, 5))
win.addshadow(51, (15, 10, 5, 5), xoffset=xoffset, yoffset=yoffset)
#win.drawline((6,6), (mousex, mousey), bgcolor='red')
win.drawline((6,6), (mousex, mousey), char='+', fgcolor='yellow', bgcolor='green')
win.cursor = 0, win.height-3
win.write('Use mouse to move line, arrow keys to move shadow, p to switch to fullscreen.')
win.cursor = 0, win.height-1
win.putchars('xoffset=%s, yoffset=%s ' % (xoffset, yoffset))
win.blittowindow()
|
bsd-3-clause
| -364,671,775,039,068,860
| 33.489362
| 94
| 0.584207
| false
| 3.349174
| false
| false
| false
|
koreiklein/fantasia
|
ui/render/text/colors.py
|
1
|
1104
|
# Copyright (C) 2013 Korei Klein <korei.klein1@gmail.com>
genericColor = None
variableColor = None
symbolColor = None
andColor = None
orColor = None
callColor = None
quantifierDividerColor = None
notColor = None
alwaysBackgroundColor = None
maybeBackgroundColor = None
relationColor = None
iffColor = None
applyColor = None
hiddenColor = None
symbolVariablePairBorderColor = None
injectionSymbolBackgroundColor = None
injectionVariableBackgroundColor = None
projectionSymbolBackgroundColor = None
projectionVariableBackgroundColor = None
callSymbolBackgroundColor = None
callVariableBackgroundColor = None
_colorPairs = [ (None
,None)
, (None
,None)
, (None
,None)
]
def productPairsColor(i):
return _colorPairs[i % len(_colorPairs)]
symbolBackgroundColor = None
symbolForegroundColor = None
def exponentialColor(isAlways):
if isAlways:
return alwaysBackgroundColor
else:
return maybeBackgroundColor
projectDotColor = None
injectDotColor = None
trueColor = None
falseColor = None
|
gpl-2.0
| 6,253,670,935,183,310,000
| 16.806452
| 57
| 0.724638
| false
| 3.820069
| false
| false
| false
|
hiviah/perspectives-observatory
|
utilities/cert_client.py
|
1
|
3009
|
# This file is part of the Perspectives Notary Server
#
# Copyright (C) 2011 Dan Wendlandt
# Copyright (C) 2011 Ondrej Mikle, CZ.NIC Labs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Testing script for get_certs feature. Little copy-pasta from simple_client.
import sys
import traceback
import base64
import urllib
import struct
from M2Crypto import BIO, RSA, EVP
from xml.dom.minidom import parseString
def fetch_certs_xml(notary_server, notary_port, service_id):
host, port = service_id.split(":")
url = "http://%s:%s/get_certs?host=%s&port=%s" % (notary_server, notary_port, host,port)
url_file = urllib.urlopen(url)
xml_text = url_file.read()
code = url_file.getcode()
return (code,xml_text)
def verify_certs_signature(service_id, xml_text, notary_pub_key_text):
doc = parseString(xml_text)
root = doc.documentElement
sig_to_verify = base64.standard_b64decode(root.getAttribute("sig"))
to_verify = service_id
cert_elements = root.getElementsByTagName("certificate")
for cert_elem in cert_elements:
cert = base64.standard_b64decode(cert_elem.getAttribute("body"))
to_verify += cert
start_ts = int(cert_elem.getAttribute("start"))
end_ts = int(cert_elem.getAttribute("end"))
to_verify += struct.pack("!2I", start_ts, end_ts)
bio = BIO.MemoryBuffer(notary_pub_key_text)
rsa_pub = RSA.load_pub_key_bio(bio)
pubkey = EVP.PKey()
pubkey.assign_rsa(rsa_pub)
pubkey.reset_context(md='sha256')
pubkey.verify_init()
pubkey.verify_update(to_verify)
return pubkey.verify_final(sig_to_verify)
if len(sys.argv) not in [4,5]:
print "usage: %s <service-id> <notary-server> <notary-port> [notary-pubkey]" % sys.argv[0]
exit(1)
notary_pub_key = None
if len(sys.argv) == 5:
notary_pub_key_file = sys.argv[4]
notary_pub_key = open(notary_pub_key_file,'r').read()
try:
code, xml_text = fetch_certs_xml(sys.argv[2],int(sys.argv[3]), sys.argv[1])
if code == 404:
print "Notary has no results"
elif code != 200:
print "Notary server returned error code: %s" % code
except Exception, e:
print "Exception contacting notary server:"
traceback.print_exc(e)
exit(1)
print 50 * "-"
print "XML Response:"
print xml_text
print 50 * "-"
if notary_pub_key:
if not verify_certs_signature(sys.argv[1].lower(), xml_text, notary_pub_key):
print "Signature verify failed. Results are not valid"
exit(1)
else:
print "Warning: no public key specified, not verifying notary signature"
|
gpl-3.0
| 7,373,507,836,183,155,000
| 30.020619
| 91
| 0.713526
| false
| 2.979208
| false
| false
| false
|
frostyfrog/mark2
|
mk2/events/server.py
|
1
|
2945
|
import re
from . import Event, get_timestamp
# input/output
output_exp = re.compile(
r'(\d{4}-\d{2}-\d{2} |)(\d{2}:\d{2}:\d{2}) \[([A-Z]+)\] (?:%s)?(.*)' % '|'.join((re.escape(x) for x in (
'[Minecraft] ',
'[Minecraft-Server] '
))))
class ServerInput(Event):
"""Send data to the server's stdin. In plugins, a shortcut
is available: self.send("say hello")"""
line = Event.Arg(required=True)
class ServerOutput(Event):
"""Issued when the server gives us a line on stdout. Note
that to handle this, you must specify both the 'level'
(e.g. INFO or SEVERE) and a regex pattern to match"""
line = Event.Arg(required=True)
time = Event.Arg()
level = Event.Arg()
data = Event.Arg()
def setup(self):
m = output_exp.match(self.line)
if m:
g = m.groups()
self.time = g[0]+g[1]
self.level= g[2]
self.data = g[3]
else:
self.level= "???"
self.data = self.line.strip()
self.time = get_timestamp(self.time)
def prefilter(self, pattern, level=None):
if level and level != self.level:
return False
m = re.match(pattern, self.data)
if not m:
return False
self.match = m
return True
# start
class ServerStart(Event):
"""Issue this event to start the server"""
pass
class ServerStarting(Event):
"""Issued by the ServerStart handler to alert listening plugins
that the server process has started"""
pid = Event.Arg()
class ServerStarted(Event):
"""Issued when we see the "Done! (1.23s)" line from the server
This event has a helper method in plugins - just overwrite
the server_started method.
"""
time = Event.Arg()
#stop
class ServerStop(Event):
"""Issue this event to stop the server."""
reason = Event.Arg(required=True)
respawn = Event.Arg(required=True)
kill = Event.Arg(default=False)
announce = Event.Arg(default=True)
dispatch_once = True
class ServerStopping(Event):
"""Issued by the ServerStop handler to alert listening plugins
that the server is going for a shutdown
This event has a helper method in plugins - just overwrite
the server_started method."""
reason = Event.Arg(required=True)
respawn = Event.Arg(required=True)
kill = Event.Arg(default=False)
class ServerStopped(Event):
"""When the server process finally dies, this event is raised"""
pass
class ServerEvent(Event):
"""Tell plugins about something happening to the server"""
cause = Event.Arg(required=True)
friendly = Event.Arg()
data = Event.Arg(required=True)
priority = Event.Arg(default=0)
def setup(self):
if not self.friendly:
self.friendly = self.cause
|
mit
| 5,955,427,389,456,440,000
| 23.541667
| 108
| 0.591171
| false
| 3.742058
| false
| false
| false
|
amitjamadagni/sympy
|
sympy/functions/special/error_functions.py
|
2
|
32620
|
""" This module contains various functions that are special cases
of incomplete gamma functions. It should probably be renamed. """
from sympy.core import Add, S, C, sympify, cacheit, pi, I
from sympy.core.function import Function, ArgumentIndexError
from sympy.functions.elementary.miscellaneous import sqrt, root
from sympy.functions.elementary.complexes import polar_lift
from sympy.functions.special.hyper import hyper, meijerg
# TODO series expansions
# TODO see the "Note:" in Ei
###############################################################################
################################ ERROR FUNCTION ###############################
###############################################################################
class erf(Function):
"""
The Gauss error function.
This function is defined as:
:math:`\\mathrm{erf}(x)=\\frac{2}{\\sqrt{\\pi}} \\int_0^x e^{-t^2} \\, \\mathrm{d}x`
Or, in ASCII::
x
/
|
| 2
| -t
2* | e dt
|
/
0
-------------
____
\/ pi
Examples
========
>>> from sympy import I, oo, erf
>>> from sympy.abc import z
Several special values are known:
>>> erf(0)
0
>>> erf(oo)
1
>>> erf(-oo)
-1
>>> erf(I*oo)
oo*I
>>> erf(-I*oo)
-oo*I
In general one can pull out factors of -1 and I from the argument:
>>> erf(-z)
-erf(z)
The error function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(erf(z))
erf(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(erf(z), z)
2*exp(-z**2)/sqrt(pi)
We can numerically evaluate the error function to arbitrary precision
on the whole complex plane:
>>> erf(4).evalf(30)
0.999999984582742099719981147840
>>> erf(-4*I).evalf(30)
-1296959.73071763923152794095062*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/Erf.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Erf
"""
nargs = 1
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return 2*C.exp(-self.args[0]**2)/sqrt(S.Pi)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.One
elif arg is S.NegativeInfinity:
return S.NegativeOne
elif arg is S.Zero:
return S.Zero
t = arg.extract_multiplicatively(S.ImaginaryUnit)
if t == S.Infinity or t == S.NegativeInfinity:
return arg
if arg.could_extract_minus_sign():
return -cls(-arg)
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
k = C.floor((n - 1)/S(2))
if len(previous_terms) > 2:
return -previous_terms[-2] * x**2 * (n - 2)/(n*k)
else:
return 2*(-1)**k * x**n/(n*C.factorial(k)*sqrt(S.Pi))
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
return self.args[0].is_real
def _eval_rewrite_as_uppergamma(self, z):
return sqrt(z**2)/z*(S.One - C.uppergamma(S.Half, z**2)/sqrt(S.Pi))
def _eval_rewrite_as_tractable(self, z):
return S.One - _erfs(z)*C.exp(-z**2)
def _eval_as_leading_term(self, x):
arg = self.args[0].as_leading_term(x)
if x in arg.free_symbols and C.Order(1, x).contains(arg):
return 2*x/sqrt(pi)
else:
return self.func(arg)
###############################################################################
#################### EXPONENTIAL INTEGRALS ####################################
###############################################################################
class Ei(Function):
r"""
The classical exponential integral.
For the use in SymPy, this function is defined as
.. math:: \operatorname{Ei}(x) = \sum_{n=1}^\infty \frac{x^n}{n\, n!}
+ \log(x) + \gamma,
where :math:`\gamma` is the Euler-Mascheroni constant.
If :math:`x` is a polar number, this defines an analytic function on the
riemann surface of the logarithm. Otherwise this defines an analytic
function in the cut plane :math:`\mathbb{C} \setminus (-\infty, 0]`.
**Background**
The name 'exponential integral' comes from the following statement:
.. math:: \operatorname{Ei}(x) = \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t
If the integral is interpreted as a Cauchy principal value, this statement
holds for :math:`x > 0` and :math:`\operatorname{Ei}(x)` as defined above.
Note that we carefully avoided defining :math:`\operatorname{Ei}(x)` for
negative real x. This is because above integral formula does not hold for
any polar lift of such :math:`x`, indeed all branches of
:math:`\operatorname{Ei}(x)` above the negative reals are imaginary.
However, the following statement holds for all :math:`x \in \mathbb{R}^*`:
.. math:: \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t =
\frac{\operatorname{Ei}\left(|x|e^{i \arg(x)}\right) +
\operatorname{Ei}\left(|x|e^{- i \arg(x)}\right)}{2},
where the integral is again understood to be a principal value if
:math:`x > 0`, and :math:`|x|e^{i \arg(x)}`,
:math:`|x|e^{- i \arg(x)}` denote two conjugate polar lifts of :math:`x`.
See Also
========
expint, sympy.functions.special.gamma_functions.uppergamma
References
==========
- Abramowitz & Stegun, section 5: http://www.math.sfu.ca/~cbm/aands/page_228.htm
- http://en.wikipedia.org/wiki/Exponential_integral
Examples
========
>>> from sympy import Ei, polar_lift, exp_polar, I, pi
>>> from sympy.abc import x
The exponential integral in SymPy is strictly undefined for negative values
of the argument. For convenience, exponential integrals with negative
arguments are immediately converted into an expression that agrees with
the classical integral definition:
>>> Ei(-1)
-I*pi + Ei(exp_polar(I*pi))
This yields a real value:
>>> Ei(-1).n(chop=True)
-0.219383934395520
On the other hand the analytic continuation is not real:
>>> Ei(polar_lift(-1)).n(chop=True)
-0.21938393439552 + 3.14159265358979*I
The exponential integral has a logarithmic branch point at the origin:
>>> Ei(x*exp_polar(2*I*pi))
Ei(x) + 2*I*pi
Differentiation is supported:
>>> Ei(x).diff(x)
exp(x)/x
The exponential integral is related to many other special functions.
For example:
>>> from sympy import uppergamma, expint, Shi
>>> Ei(x).rewrite(expint)
-expint(1, x*exp_polar(I*pi)) - I*pi
>>> Ei(x).rewrite(Shi)
Chi(x) + Shi(x)
"""
nargs = 1
@classmethod
def eval(cls, z):
if not z.is_polar and z.is_negative:
# Note: is this a good idea?
return Ei(polar_lift(z)) - pi*I
nz, n = z.extract_branch_factor()
if n:
return Ei(nz) + 2*I*pi*n
def fdiff(self, argindex=1):
from sympy import unpolarify
arg = unpolarify(self.args[0])
if argindex == 1:
return C.exp(arg)/arg
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
if (self.args[0]/polar_lift(-1)).is_positive:
return Function._eval_evalf(self, prec) + (I*pi)._eval_evalf(prec)
return Function._eval_evalf(self, prec)
def _eval_rewrite_as_uppergamma(self, z):
from sympy import uppergamma
# XXX this does not currently work usefully because uppergamma
# immediately turns into expint
return -uppergamma(0, polar_lift(-1)*z) - I*pi
def _eval_rewrite_as_expint(self, z):
return -expint(1, polar_lift(-1)*z) - I*pi
def _eval_rewrite_as_Si(self, z):
return Shi(z) + Chi(z)
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
_eval_rewrite_as_Chi = _eval_rewrite_as_Si
_eval_rewrite_as_Shi = _eval_rewrite_as_Si
class expint(Function):
r"""
Generalized exponential integral.
This function is defined as
.. math:: \operatorname{E}_\nu(z) = z^{\nu - 1} \Gamma(1 - \nu, z),
where `\Gamma(1 - \nu, z)` is the upper incomplete gamma function
(``uppergamma``).
Hence for :math:`z` with positive real part we have
.. math:: \operatorname{E}_\nu(z)
= \int_1^\infty \frac{e^{-zt}}{z^\nu} \mathrm{d}t,
which explains the name.
The representation as an incomplete gamma function provides an analytic
continuation for :math:`\operatorname{E}_\nu(z)`. If :math:`\nu` is a
non-positive integer the exponential integral is thus an unbranched
function of :math:`z`, otherwise there is a branch point at the origin.
Refer to the incomplete gamma function documentation for details of the
branching behavior.
See Also
========
E1: The classical case, returns expint(1, z).
Ei: Another related function called exponential integral.
sympy.functions.special.gamma_functions.uppergamma
References
==========
- http://dlmf.nist.gov/8.19
- http://functions.wolfram.com/GammaBetaErf/ExpIntegralE/
- http://en.wikipedia.org/wiki/Exponential_integral
Examples
========
>>> from sympy import expint, S
>>> from sympy.abc import nu, z
Differentiation is supported. Differentiation with respect to z explains
further the name: for integral orders, the exponential integral is an
iterated integral of the exponential function.
>>> expint(nu, z).diff(z)
-expint(nu - 1, z)
Differentiation with respect to nu has no classical expression:
>>> expint(nu, z).diff(nu)
-z**(nu - 1)*meijerg(((), (1, 1)), ((0, 0, -nu + 1), ()), z)
At non-postive integer orders, the exponential integral reduces to the
exponential function:
>>> expint(0, z)
exp(-z)/z
>>> expint(-1, z)
exp(-z)/z + exp(-z)/z**2
At half-integers it reduces to error functions:
>>> expint(S(1)/2, z)
-sqrt(pi)*erf(sqrt(z))/sqrt(z) + sqrt(pi)/sqrt(z)
At positive integer orders it can be rewritten in terms of exponentials
and expint(1, z). Use expand_func() to do this:
>>> from sympy import expand_func
>>> expand_func(expint(5, z))
z**4*expint(1, z)/24 + (-z**3 + z**2 - 2*z + 6)*exp(-z)/24
The generalised exponential integral is essentially equivalent to the
incomplete gamma function:
>>> from sympy import uppergamma
>>> expint(nu, z).rewrite(uppergamma)
z**(nu - 1)*uppergamma(-nu + 1, z)
As such it is branched at the origin:
>>> from sympy import exp_polar, pi, I
>>> expint(4, z*exp_polar(2*pi*I))
I*pi*z**3/3 + expint(4, z)
>>> expint(nu, z*exp_polar(2*pi*I))
z**(nu - 1)*(exp(2*I*pi*nu) - 1)*gamma(-nu + 1) + expint(nu, z)
"""
nargs = 2
@classmethod
def eval(cls, nu, z):
from sympy import (unpolarify, expand_mul, uppergamma, exp, gamma,
factorial)
nu2 = unpolarify(nu)
if nu != nu2:
return expint(nu2, z)
if nu.is_Integer and nu <= 0 or (not nu.is_Integer and (2*nu).is_Integer):
return unpolarify(expand_mul(z**(nu - 1)*uppergamma(1 - nu, z)))
# Extract branching information. This can be deduced from what is
# explained in lowergamma.eval().
z, n = z.extract_branch_factor()
if n == 0:
return
if nu.is_integer:
if (nu > 0) is not True:
return
return expint(nu, z) \
- 2*pi*I*n*(-1)**(nu - 1)/factorial(nu - 1)*unpolarify(z)**(nu - 1)
else:
return (exp(2*I*pi*nu*n) - 1)*z**(nu - 1)*gamma(1 - nu) + expint(nu, z)
def fdiff(self, argindex):
from sympy import meijerg
nu, z = self.args
if argindex == 1:
return -z**(nu - 1)*meijerg([], [1, 1], [0, 0, 1 - nu], [], z)
elif argindex == 2:
return -expint(nu - 1, z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_uppergamma(self, nu, z):
from sympy import uppergamma
return z**(nu - 1)*uppergamma(1 - nu, z)
def _eval_rewrite_as_Ei(self, nu, z):
from sympy import exp_polar, unpolarify, exp, factorial
if nu == 1:
return -Ei(z*exp_polar(-I*pi)) - I*pi
elif nu.is_Integer and nu > 1:
# DLMF, 8.19.7
x = -unpolarify(z)
return x**(nu - 1)/factorial(nu - 1)*E1(z).rewrite(Ei) + \
exp(x)/factorial(nu - 1) * \
Add(*[factorial(nu - k - 2)*x**k for k in range(nu - 1)])
else:
return self
def _eval_expand_func(self, **hints):
return self.rewrite(Ei).rewrite(expint, **hints)
def _eval_rewrite_as_Si(self, nu, z):
if nu != 1:
return self
return Shi(z) - Chi(z)
_eval_rewrite_as_Ci = _eval_rewrite_as_Si
_eval_rewrite_as_Chi = _eval_rewrite_as_Si
_eval_rewrite_as_Shi = _eval_rewrite_as_Si
def E1(z):
"""
Classical case of the generalized exponential integral.
This is equivalent to ``expint(1, z)``.
"""
return expint(1, z)
###############################################################################
#################### TRIGONOMETRIC INTEGRALS ##################################
###############################################################################
class TrigonometricIntegral(Function):
""" Base class for trigonometric integrals. """
nargs = 1
@classmethod
def eval(cls, z):
if z == 0:
return cls._atzero
elif z is S.Infinity:
return cls._atinf
elif z is S.NegativeInfinity:
return cls._atneginf
nz = z.extract_multiplicatively(polar_lift(I))
if nz is None and cls._trigfunc(0) == 0:
nz = z.extract_multiplicatively(I)
if nz is not None:
return cls._Ifactor(nz, 1)
nz = z.extract_multiplicatively(polar_lift(-I))
if nz is not None:
return cls._Ifactor(nz, -1)
nz = z.extract_multiplicatively(polar_lift(-1))
if nz is None and cls._trigfunc(0) == 0:
nz = z.extract_multiplicatively(-1)
if nz is not None:
return cls._minusfactor(nz)
nz, n = z.extract_branch_factor()
if n == 0 and nz == z:
return
return 2*pi*I*n*cls._trigfunc(0) + cls(nz)
def fdiff(self, argindex=1):
from sympy import unpolarify
arg = unpolarify(self.args[0])
if argindex == 1:
return self._trigfunc(arg)/arg
def _eval_rewrite_as_Ei(self, z):
return self._eval_rewrite_as_expint(z).rewrite(Ei)
def _eval_rewrite_as_uppergamma(self, z):
from sympy import uppergamma
return self._eval_rewrite_as_expint(z).rewrite(uppergamma)
def _eval_nseries(self, x, n, logx):
# NOTE this is fairly inefficient
from sympy import log, EulerGamma, Pow
n += 1
if self.args[0].subs(x, 0) != 0:
return super(TrigonometricIntegral, self)._eval_nseries(x, n, logx)
baseseries = self._trigfunc(x)._eval_nseries(x, n, logx)
if self._trigfunc(0) != 0:
baseseries -= 1
baseseries = baseseries.replace(Pow, lambda t, n: t**n/n)
if self._trigfunc(0) != 0:
baseseries += EulerGamma + log(x)
return baseseries.subs(x, self.args[0])._eval_nseries(x, n, logx)
class Si(TrigonometricIntegral):
r"""
Sine integral.
This function is defined by
.. math:: \operatorname{Si}(z) = \int_0^z \frac{\sin{t}}{t} \mathrm{d}t.
It is an entire function.
See Also
========
Ci: Cosine integral.
Shi: Sinh integral.
Chi: Cosh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Si
>>> from sympy.abc import z
The sine integral is an antiderivative of sin(z)/z:
>>> Si(z).diff(z)
sin(z)/z
It is unbranched:
>>> from sympy import exp_polar, I, pi
>>> Si(z*exp_polar(2*I*pi))
Si(z)
Sine integral behaves much like ordinary sine under multiplication by I:
>>> Si(I*z)
I*Shi(z)
>>> Si(-z)
-Si(z)
It can also be expressed in terms of exponential integrals, but beware
that the latter is branched:
>>> from sympy import expint
>>> Si(z).rewrite(expint)
-I*(-expint(1, z*exp_polar(-I*pi/2))/2 +
expint(1, z*exp_polar(I*pi/2))/2) + pi/2
"""
_trigfunc = C.sin
_atzero = S(0)
_atinf = pi*S.Half
_atneginf = -pi*S.Half
@classmethod
def _minusfactor(cls, z):
return -Si(z)
@classmethod
def _Ifactor(cls, z, sign):
return I*Shi(z)*sign
def _eval_rewrite_as_expint(self, z):
# XXX should we polarify z?
return pi/2 + (E1(polar_lift(I)*z) - E1(polar_lift(-I)*z))/2/I
class Ci(TrigonometricIntegral):
r"""
Cosine integral.
This function is defined for positive :math:`x` by
.. math:: \operatorname{Ci}(x) = \gamma + \log{x}
+ \int_0^x \frac{\cos{t} - 1}{t} \mathrm{d}t
= -\int_x^\infty \frac{\cos{t}}{t} \mathrm{d}t,
where :math:`\gamma` is the Euler-Mascheroni constant.
We have
.. math:: \operatorname{Ci}(z) =
-\frac{\operatorname{E}_1\left(e^{i\pi/2} z\right)
+ \operatorname{E}_1\left(e^{-i \pi/2} z\right)}{2}
which holds for all polar :math:`z` and thus provides an analytic
continuation to the Riemann surface of the logarithm.
The formula also holds as stated
for :math:`z \in \mathbb{C}` with :math:`Re(z) > 0`.
By lifting to the principal branch we obtain an analytic function on the
cut complex plane.
See Also
========
Si: Sine integral.
Shi: Sinh integral.
Chi: Cosh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Ci
>>> from sympy.abc import z
The cosine integral is a primitive of cos(z)/z:
>>> Ci(z).diff(z)
cos(z)/z
It has a logarithmic branch point at the origin:
>>> from sympy import exp_polar, I, pi
>>> Ci(z*exp_polar(2*I*pi))
Ci(z) + 2*I*pi
Cosine integral behaves somewhat like ordinary cos under multiplication by I:
>>> from sympy import polar_lift
>>> Ci(polar_lift(I)*z)
Chi(z) + I*pi/2
>>> Ci(polar_lift(-1)*z)
Ci(z) + I*pi
It can also be expressed in terms of exponential integrals:
>>> from sympy import expint
>>> Ci(z).rewrite(expint)
-expint(1, z*exp_polar(-I*pi/2))/2 - expint(1, z*exp_polar(I*pi/2))/2
"""
_trigfunc = C.cos
_atzero = S.ComplexInfinity
_atinf = S.Zero
_atneginf = I*pi
@classmethod
def _minusfactor(cls, z):
return Ci(z) + I*pi
@classmethod
def _Ifactor(cls, z, sign):
return Chi(z) + I*pi/2*sign
def _eval_rewrite_as_expint(self, z):
return -(E1(polar_lift(I)*z) + E1(polar_lift(-I)*z))/2
class Shi(TrigonometricIntegral):
r"""
Sinh integral.
This function is defined by
.. math:: \operatorname{Shi}(z) = \int_0^z \frac{\sinh{t}}{t} \mathrm{d}t.
It is an entire function.
See Also
========
Si: Sine integral.
Ci: Cosine integral.
Chi: Cosh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Shi
>>> from sympy.abc import z
The Sinh integral is a primitive of sinh(z)/z:
>>> Shi(z).diff(z)
sinh(z)/z
It is unbranched:
>>> from sympy import exp_polar, I, pi
>>> Shi(z*exp_polar(2*I*pi))
Shi(z)
Sinh integral behaves much like ordinary sinh under multiplication by I:
>>> Shi(I*z)
I*Si(z)
>>> Shi(-z)
-Shi(z)
It can also be expressed in terms of exponential integrals, but beware
that the latter is branched:
>>> from sympy import expint
>>> Shi(z).rewrite(expint)
expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2
"""
_trigfunc = C.sinh
_atzero = S(0)
_atinf = S.Infinity
_atneginf = S.NegativeInfinity
@classmethod
def _minusfactor(cls, z):
return -Shi(z)
@classmethod
def _Ifactor(cls, z, sign):
return I*Si(z)*sign
def _eval_rewrite_as_expint(self, z):
from sympy import exp_polar
# XXX should we polarify z?
return (E1(z) - E1(exp_polar(I*pi)*z))/2 - I*pi/2
class Chi(TrigonometricIntegral):
r"""
Cosh integral.
This function is defined for positive :math:`x` by
.. math:: \operatorname{Chi}(x) = \gamma + \log{x}
+ \int_0^x \frac{\cosh{t} - 1}{t} \mathrm{d}t,
where :math:`\gamma` is the Euler-Mascheroni constant.
We have
.. math:: \operatorname{Chi}(z) = \operatorname{Ci}\left(e^{i \pi/2}z\right)
- i\frac{\pi}{2},
which holds for all polar :math:`z` and thus provides an analytic
continuation to the Riemann surface of the logarithm.
By lifting to the principal branch we obtain an analytic function on the
cut complex plane.
See Also
========
Si: Sine integral.
Ci: Cosine integral.
Shi: Sinh integral.
expint: The generalised exponential integral.
References
==========
- http://en.wikipedia.org/wiki/Trigonometric_integral
Examples
========
>>> from sympy import Chi
>>> from sympy.abc import z
The cosh integral is a primitive of cosh(z)/z:
>>> Chi(z).diff(z)
cosh(z)/z
It has a logarithmic branch point at the origin:
>>> from sympy import exp_polar, I, pi
>>> Chi(z*exp_polar(2*I*pi))
Chi(z) + 2*I*pi
Cosh integral behaves somewhat like ordinary cosh under multiplication by I:
>>> from sympy import polar_lift
>>> Chi(polar_lift(I)*z)
Ci(z) + I*pi/2
>>> Chi(polar_lift(-1)*z)
Chi(z) + I*pi
It can also be expressed in terms of exponential integrals:
>>> from sympy import expint
>>> Chi(z).rewrite(expint)
-expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2
"""
_trigfunc = C.cosh
_atzero = S.ComplexInfinity
_atinf = S.Infinity
_atneginf = S.Infinity
@classmethod
def _minusfactor(cls, z):
return Chi(z) + I*pi
@classmethod
def _Ifactor(cls, z, sign):
return Ci(z) + I*pi/2*sign
def _eval_rewrite_as_expint(self, z):
from sympy import exp_polar
return -I*pi/2 - (E1(z) + E1(exp_polar(I*pi)*z))/2
###############################################################################
#################### FRESNEL INTEGRALS ########################################
###############################################################################
class FresnelIntegral(Function):
""" Base class for the Fresnel integrals."""
nargs = 1
unbranched = True
@classmethod
def eval(cls, z):
# Value at zero
if z is S.Zero:
return S(0)
# Try to pull out factors of -1 and I
prefact = S.One
newarg = z
changed = False
nz = newarg.extract_multiplicatively(-1)
if nz is not None:
prefact = -prefact
newarg = nz
changed = True
nz = newarg.extract_multiplicatively(I)
if nz is not None:
prefact = cls._sign*I*prefact
newarg = nz
changed = True
if changed:
return prefact*cls(newarg)
# Values at positive infinities signs
# if any were extracted automatically
if z is S.Infinity:
return S.Half
elif z is I*S.Infinity:
return cls._sign*I*S.Half
def fdiff(self, argindex=1):
if argindex == 1:
return self._trigfunc(S.Half*pi*self.args[0]**2)
else:
raise ArgumentIndexError(self, argindex)
def _eval_is_real(self):
return self.args[0].is_real
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _as_real_imag(self, deep=True, **hints):
if self.args[0].is_real:
if deep:
hints['complex'] = False
return (self.expand(deep, **hints), S.Zero)
else:
return (self, S.Zero)
if deep:
re, im = self.args[0].expand(deep, **hints).as_real_imag()
else:
re, im = self.args[0].as_real_imag()
return (re, im)
def as_real_imag(self, deep=True, **hints):
# Fresnel S
# http://functions.wolfram.com/06.32.19.0003.01
# http://functions.wolfram.com/06.32.19.0006.01
# Fresnel C
# http://functions.wolfram.com/06.33.19.0003.01
# http://functions.wolfram.com/06.33.19.0006.01
x, y = self._as_real_imag(deep=deep, **hints)
sq = -y**2/x**2
re = S.Half*(self.func(x + x*sqrt(sq)) + self.func(x - x*sqrt(sq)))
im = x/(2*y) * sqrt(sq) * (self.func(x - x*sqrt(sq)) -
self.func(x + x*sqrt(sq)))
return (re, im)
class fresnels(FresnelIntegral):
r"""
Fresnel integral S.
This function is defined by
.. math:: \operatorname{S}(z) = \int_0^z \sin{\frac{\pi}{2} t^2} \mathrm{d}t.
It is an entire function.
Examples
========
>>> from sympy import I, oo, fresnels
>>> from sympy.abc import z
Several special values are known:
>>> fresnels(0)
0
>>> fresnels(oo)
1/2
>>> fresnels(-oo)
-1/2
>>> fresnels(I*oo)
-I/2
>>> fresnels(-I*oo)
I/2
In general one can pull out factors of -1 and I from the argument:
>>> fresnels(-z)
-fresnels(z)
>>> fresnels(I*z)
-I*fresnels(z)
The Fresnel S integral obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(fresnels(z))
fresnels(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(fresnels(z), z)
sin(pi*z**2/2)
Defining the Fresnel functions via an integral
>>> from sympy import integrate, pi, sin, gamma, expand_func
>>> integrate(sin(pi*z**2/2), z)
3*fresnels(z)*gamma(3/4)/(4*gamma(7/4))
>>> expand_func(integrate(sin(pi*z**2/2), z))
fresnels(z)
We can numerically evaluate the Fresnel integral to arbitrary precision
on the whole complex plane:
>>> fresnels(2).evalf(30)
0.343415678363698242195300815958
>>> fresnels(-2*I).evalf(30)
0.343415678363698242195300815958*I
See Also
========
fresnelc
References
==========
.. [1] http://en.wikipedia.org/wiki/Fresnel_integral
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/FresnelIntegrals.html
.. [4] http://functions.wolfram.com/GammaBetaErf/FresnelS
"""
_trigfunc = C.sin
_sign = -S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 1:
p = previous_terms[-1]
return (-pi**2*x**4*(4*n - 1)/(8*n*(2*n + 1)*(4*n + 3))) * p
else:
return x**3 * (-x**4)**n * (S(2)**(-2*n - 1)*pi**(2*n + 1)) / ((4*n + 3)*C.factorial(2*n + 1))
def _eval_rewrite_as_erf(self, z):
return (S.One + I)/4 * (erf((S.One + I)/2*sqrt(pi)*z) - I*erf((S.One - I)/2*sqrt(pi)*z))
def _eval_rewrite_as_hyper(self, z):
return pi*z**3/6 * hyper([S(3)/4], [S(3)/2, S(7)/4], -pi**2*z**4/16)
def _eval_rewrite_as_meijerg(self, z):
return (pi*z**(S(9)/4) / (sqrt(2)*(z**2)**(S(3)/4)*(-z)**(S(3)/4))
* meijerg([], [1], [S(3)/4], [S(1)/4, 0], -pi**2*z**4/16))
class fresnelc(FresnelIntegral):
r"""
Fresnel integral C.
This function is defined by
.. math:: \operatorname{C}(z) = \int_0^z \cos{\frac{\pi}{2} t^2} \mathrm{d}t.
It is an entire function.
Examples
========
>>> from sympy import I, oo, fresnelc
>>> from sympy.abc import z
Several special values are known:
>>> fresnelc(0)
0
>>> fresnelc(oo)
1/2
>>> fresnelc(-oo)
-1/2
>>> fresnelc(I*oo)
I/2
>>> fresnelc(-I*oo)
-I/2
In general one can pull out factors of -1 and I from the argument:
>>> fresnelc(-z)
-fresnelc(z)
>>> fresnelc(I*z)
I*fresnelc(z)
The Fresnel C integral obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(fresnelc(z))
fresnelc(conjugate(z))
Differentiation with respect to z is supported:
>>> from sympy import diff
>>> diff(fresnelc(z), z)
cos(pi*z**2/2)
Defining the Fresnel functions via an integral
>>> from sympy import integrate, pi, cos, gamma, expand_func
>>> integrate(cos(pi*z**2/2), z)
fresnelc(z)*gamma(1/4)/(4*gamma(5/4))
>>> expand_func(integrate(cos(pi*z**2/2), z))
fresnelc(z)
We can numerically evaluate the Fresnel integral to arbitrary precision
on the whole complex plane:
>>> fresnelc(2).evalf(30)
0.488253406075340754500223503357
>>> fresnelc(-2*I).evalf(30)
-0.488253406075340754500223503357*I
See Also
========
fresnels
References
==========
.. [1] http://en.wikipedia.org/wiki/Fresnel_integral
.. [2] http://dlmf.nist.gov/7
.. [3] http://mathworld.wolfram.com/FresnelIntegrals.html
.. [4] http://functions.wolfram.com/GammaBetaErf/FresnelC
"""
_trigfunc = C.cos
_sign = S.One
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0:
return S.Zero
else:
x = sympify(x)
if len(previous_terms) > 1:
p = previous_terms[-1]
return (-pi**2*x**4*(4*n - 3)/(8*n*(2*n - 1)*(4*n + 1))) * p
else:
return x * (-x**4)**n * (S(2)**(-2*n)*pi**(2*n)) / ((4*n + 1)*C.factorial(2*n))
def _eval_rewrite_as_erf(self, z):
return (S.One - I)/4 * (erf((S.One + I)/2*sqrt(pi)*z) + I*erf((S.One - I)/2*sqrt(pi)*z))
def _eval_rewrite_as_hyper(self, z):
return z * hyper([S.One/4], [S.One/2, S(5)/4], -pi**2*z**4/16)
def _eval_rewrite_as_meijerg(self, z):
return (pi*z**(S(3)/4) / (sqrt(2)*root(z**2, 4)*root(-z, 4))
* meijerg([], [1], [S(1)/4], [S(3)/4, 0], -pi**2*z**4/16))
###############################################################################
#################### HELPER FUNCTIONS #########################################
###############################################################################
class _erfs(Function):
"""
Helper function to make the :math:`erf(z)` function
tractable for the Gruntz algorithm.
"""
nargs = 1
def _eval_aseries(self, n, args0, x, logx):
if args0[0] != S.Infinity:
return super(_erfs, self)._eval_aseries(n, args0, x, logx)
z = self.args[0]
l = [ 1/sqrt(S.Pi) * C.factorial(2*k)*(-S(
4))**(-k)/C.factorial(k) * (1/z)**(2*k + 1) for k in xrange(0, n) ]
o = C.Order(1/z**(2*n + 1), x)
# It is very inefficient to first add the order and then do the nseries
return (Add(*l))._eval_nseries(x, n, logx) + o
def fdiff(self, argindex=1):
if argindex == 1:
z = self.args[0]
return -2/sqrt(S.Pi) + 2*z*_erfs(z)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_intractable(self, z):
return (S.One - erf(z))*C.exp(z**2)
|
bsd-3-clause
| 4,165,216,953,519,735,300
| 26.644068
| 110
| 0.549203
| false
| 3.245771
| false
| false
| false
|
pshchelo/heat
|
heat/tests/openstack/test_volume.py
|
1
|
42764
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import json
from cinderclient import exceptions as cinder_exp
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.objects import resource_data as resource_data_object
from heat.tests.nova import fakes as fakes_nova
from heat.tests import test_volume_utils as vt_base
from heat.tests import utils
cinder_volume_template = '''
heat_template_version: 2013-05-23
description: Cinder volumes and attachments.
resources:
volume:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 1
name: test_name
description: test_description
metadata:
key: value
volume2:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 2
volume3:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 1
name: test_name
scheduler_hints: {"hint1": "good_advice"}
attachment:
type: OS::Cinder::VolumeAttachment
properties:
instance_uuid: WikiDatabase
volume_id: { get_resource: volume }
mountpoint: /dev/vdc
'''
single_cinder_volume_template = '''
heat_template_version: 2013-05-23
description: Cinder volume
resources:
volume:
type: OS::Cinder::Volume
properties:
size: 1
name: test_name
description: test_description
'''
class CinderVolumeTest(vt_base.BaseVolumeTest):
def setUp(self):
super(CinderVolumeTest, self).setUp()
self.t = template_format.parse(cinder_volume_template)
self.use_cinder = True
def _mock_create_volume(self, fv, stack_name, size=1,
final_status='available'):
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=size, availability_zone='nova',
description='test_description',
name='test_name',
metadata={'key': 'value'}).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume(final_status, id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
return fv_ready
def test_cinder_volume_size_constraint(self):
self.t['resources']['volume']['properties']['size'] = 0
stack = utils.parse_stack(self.t)
error = self.assertRaises(exception.StackValidationFailed,
self.create_volume,
self.t, stack, 'volume')
self.assertEqual(
"Property error : resources.volume.properties.size: "
"0 is out of range (min: 1, max: None)", six.text_type(error))
def test_cinder_create(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_cvolume_stack'
self.stub_SnapshotConstraint_validate()
self.stub_VolumeConstraint_validate()
self.stub_VolumeTypeConstraint_validate()
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='test_description',
name='test_name',
metadata={'key': 'value'},
volume_type='lvm').AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'].update({
'volume_type': 'lvm',
})
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_create_from_image(self):
fv = vt_base.FakeVolume('downloading')
stack_name = 'test_cvolume_create_from_img_stack'
image_id = '46988116-6703-4623-9dbc-2bc6d284021b'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id')
glance.GlanceClientPlugin.get_image_id(
image_id).MultipleTimes().AndReturn(image_id)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='ImageVolumeDescription',
name='ImageVolume',
imageRef=image_id).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'name': 'ImageVolume',
'description': 'ImageVolumeDescription',
'availability_zone': 'nova',
'image': image_id,
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_create_with_read_only(self):
fv = vt_base.FakeVolume('with_read_only_access_mode')
stack_name = 'test_create_with_read_only'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='ImageVolumeDescription',
name='ImageVolume').AndReturn(fv)
update_readonly_mock = self.patchobject(self.cinder_fc.volumes,
'update_readonly_flag')
update_readonly_mock(fv.id, False).return_value(None)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'name': 'ImageVolume',
'description': 'ImageVolumeDescription',
'availability_zone': 'nova',
'read_only': False,
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_default(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_cvolume_default_stack'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
vol_name = utils.PhysName(stack_name, 'volume')
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description=None,
name=vol_name).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'availability_zone': 'nova',
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_fn_getatt(self):
stack_name = 'test_cvolume_fngetatt_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
fv = vt_base.FakeVolume(
'available', availability_zone='zone1',
size=1, snapshot_id='snap-123', name='name',
description='desc', volume_type='lvm',
metadata={'key': 'value'}, source_volid=None,
bootable=False, created_at='2013-02-25T02:40:21.000000',
encrypted=False, attachments=[])
self.cinder_fc.volumes.get('vol-123').MultipleTimes().AndReturn(fv)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.assertEqual(u'zone1', rsrc.FnGetAtt('availability_zone'))
self.assertEqual(u'1', rsrc.FnGetAtt('size'))
self.assertEqual(u'snap-123', rsrc.FnGetAtt('snapshot_id'))
self.assertEqual(u'name', rsrc.FnGetAtt('display_name'))
self.assertEqual(u'desc', rsrc.FnGetAtt('display_description'))
self.assertEqual(u'lvm', rsrc.FnGetAtt('volume_type'))
self.assertEqual(json.dumps({'key': 'value'}),
rsrc.FnGetAtt('metadata'))
self.assertEqual({'key': 'value'},
rsrc.FnGetAtt('metadata_values'))
self.assertEqual(u'None', rsrc.FnGetAtt('source_volid'))
self.assertEqual(u'available', rsrc.FnGetAtt('status'))
self.assertEqual(u'2013-02-25T02:40:21.000000',
rsrc.FnGetAtt('created_at'))
self.assertEqual(u'False', rsrc.FnGetAtt('bootable'))
self.assertEqual(u'False', rsrc.FnGetAtt('encrypted'))
self.assertEqual(u'[]', rsrc.FnGetAtt('attachments'))
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'unknown')
self.assertEqual(
'The Referenced Attribute (volume unknown) is incorrect.',
six.text_type(error))
self.m.VerifyAll()
def test_cinder_attachment(self):
stack_name = 'test_cvolume_attach_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_cinder_volume_shrink_fails(self):
stack_name = 'test_cvolume_shrink_fail_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name, size=2)
# update script
fv = vt_base.FakeVolume('available', size=2)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.m.ReplayAll()
self.t['resources']['volume']['properties']['size'] = 2
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 1
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertEqual('NotSupported: Shrinking volume is not supported.',
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_detached(self):
stack_name = 'test_cvolume_extend_det_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2)
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('available'))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_fails_to_start(self):
stack_name = 'test_cvolume_extend_fail_start_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2).AndRaise(
cinder_exp.OverLimit(413))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertIn('Over limit', six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_fails_to_complete(self):
stack_name = 'test_cvolume_extend_fail_compl_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2)
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('error_extending'))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertIn("Volume resize failed - Unknown status error_extending",
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_attached(self):
stack_name = 'test_cvolume_extend_att_stack'
# create script
self.stub_VolumeConstraint_validate()
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
# update script
attachments = [{'id': 'vol-123',
'device': '/dev/vdc',
'server_id': u'WikiDatabase'}]
fv2 = vt_base.FakeVolume('in-use',
attachments=attachments, size=1)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
# detach script
fvd = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fvd)
self.cinder_fc.volumes.get(fvd.id).AndReturn(fvd)
self.fc.volumes.delete_server_volume('WikiDatabase', 'vol-123')
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fvd)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# resize script
self.cinder_fc.volumes.extend(fvd.id, 2)
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('available'))
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.create_attachment(self.t, stack, 'attachment')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_created_from_backup_with_same_size(self):
stack_name = 'test_cvolume_extend_snapsht_stack'
# create script
fvbr = vt_base.FakeBackupRestore('vol-123')
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(
vt_base.FakeVolume('restoring-backup'))
vol_name = utils.PhysName(stack_name, 'volume')
self.cinder_fc.volumes.update('vol-123', description=None,
name=vol_name).AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(
vt_base.FakeVolume('available'))
# update script
fv = vt_base.FakeVolume('available', size=2)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'availability_zone': 'nova',
'backup_id': 'backup-123'
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('available', fv.status)
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_retype(self):
fv = vt_base.FakeVolume('available',
size=1, name='my_vol',
description='test')
stack_name = 'test_cvolume_retype'
new_vol_type = 'new_type'
self.patchobject(cinder.CinderClientPlugin, '_create',
return_value=self.cinder_fc)
self.patchobject(self.cinder_fc.volumes, 'create', return_value=fv)
self.patchobject(self.cinder_fc.volumes, 'get', return_value=fv)
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume2')
props = copy.deepcopy(rsrc.properties.data)
props['volume_type'] = new_vol_type
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
self.patchobject(cinder.CinderClientPlugin, 'get_volume_type',
return_value=new_vol_type)
self.patchobject(self.cinder_fc.volumes, 'retype')
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(1, self.cinder_fc.volumes.retype.call_count)
self.cinder_fc.volume_api_version = 1
new_vol_type_1 = 'new_type_1'
props = copy.deepcopy(rsrc.properties.data)
props['volume_type'] = new_vol_type_1
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
# if the volume api is v1, not support to retype
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertEqual('NotSupported: Using Cinder API V1, '
'volume_type update is not supported.',
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.assertEqual(1, self.cinder_fc.volumes.retype.call_count)
def test_cinder_volume_update_name_and_metadata(self):
# update the name, description and metadata
fv = vt_base.FakeVolume('creating',
size=1, name='my_vol',
description='test')
stack_name = 'test_cvolume_updname_stack'
update_name = 'update_name'
meta = {'Key': 'New Value'}
update_description = 'update_description'
kwargs = {
'name': update_name,
'description': update_description
}
fv = self._mock_create_volume(fv, stack_name)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.update(fv, **kwargs).AndReturn(None)
self.cinder_fc.volumes.update_all_metadata(fv, meta).AndReturn(None)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['name'] = update_name
props['description'] = update_description
props['metadata'] = meta
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
def test_cinder_volume_update_read_only(self):
# update read only access mode
fv = vt_base.FakeVolume('update_read_only_access_mode')
stack_name = 'test_update_read_only'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='test_description',
name='test_name',
metadata={u'key': u'value'}).AndReturn(fv)
update_readonly_mock = self.patchobject(self.cinder_fc.volumes,
'update_readonly_flag')
update_readonly_mock(fv.id, True).return_value(None)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['read_only'] = True
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
def test_cinder_snapshot(self):
stack_name = 'test_cvolume_snpsht_stack'
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('available'))
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack['volume']
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.snapshot)()
self.assertEqual((rsrc.SNAPSHOT, rsrc.COMPLETE), rsrc.state)
self.assertEqual({'backup_id': 'backup-123'},
resource_data_object.ResourceData.get_all(rsrc))
self.m.VerifyAll()
def test_cinder_snapshot_error(self):
stack_name = 'test_cvolume_snpsht_err_stack'
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
fail_reason = 'Could not determine which Swift endpoint to use'
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('error', fail_reason=fail_reason))
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack['volume']
scheduler.TaskRunner(rsrc.create)()
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.snapshot))
self.assertEqual((rsrc.SNAPSHOT, rsrc.FAILED), rsrc.state)
self.assertIn(fail_reason, rsrc.status_reason)
self.assertEqual({u'backup_id': u'backup-123'},
resource_data_object.ResourceData.get_all(rsrc))
self.m.VerifyAll()
def test_cinder_volume_attachment_update_device(self):
stack_name = 'test_cvolume_attach_udev_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
device=u'/dev/vdd',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
props = copy.deepcopy(rsrc.properties.data)
props['mountpoint'] = '/dev/vdd'
props['volume_id'] = 'vol-123'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_attachment_update_volume(self):
stack_name = 'test_cvolume_attach_uvol_stack'
self.stub_VolumeConstraint_validate()
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
fv2 = vt_base.FakeVolume('creating', id='vol-456')
vol2_name = utils.PhysName(stack_name, 'volume2')
self.cinder_fc.volumes.create(
size=2, availability_zone='nova',
description=None,
name=vol2_name).AndReturn(fv2)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
fv2 = vt_base.FakeVolume('available', id=fv2.id)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
fv2a = vt_base.FakeVolume('attaching', id='vol-456')
self._mock_create_server_volume_script(fv2a, volume='vol-456',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.create_volume(self.t, stack, 'volume2')
rsrc = self.create_attachment(self.t, stack, 'attachment')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
props = copy.deepcopy(rsrc.properties.data)
props['volume_id'] = 'vol-456'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(fv2a.id, rsrc.resource_id)
self.m.VerifyAll()
def test_cinder_volume_attachment_update_server(self):
stack_name = 'test_cvolume_attach_usrv_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
server=u'AnotherServer',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
props = copy.deepcopy(rsrc.properties.data)
props['instance_uuid'] = 'AnotherServer'
props['volume_id'] = 'vol-123'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_create_with_scheduler_hints(self):
fv = vt_base.FakeVolume('creating')
cinder.CinderClientPlugin._create().AndReturn(self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, name='test_name', description=None,
availability_zone='nova',
scheduler_hints={'hint1': 'good_advice'}).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
stack_name = 'test_cvolume_scheduler_hints_stack'
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume3')
self.m.VerifyAll()
def test_cinder_create_with_scheduler_hints_and_cinder_api_v1(self):
cinder.CinderClientPlugin._create().AndReturn(self.cinder_fc)
self.cinder_fc.volume_api_version = 1
self.m.ReplayAll()
stack_name = 'test_cvolume_scheduler_hints_api_v1_stack'
stack = utils.parse_stack(self.t, stack_name=stack_name)
ex = self.assertRaises(exception.StackValidationFailed,
self.create_volume, self.t, stack, 'volume3')
self.assertIn('Scheduler hints are not supported by the current '
'volume API.', six.text_type(ex))
self.m.VerifyAll()
def _test_cinder_create_invalid_property_combinations(
self, stack_name, combinations, err_msg, exc):
stack = utils.parse_stack(self.t, stack_name=stack_name)
vp = stack.t['Resources']['volume2']['Properties']
vp.pop('size')
vp.update(combinations)
rsrc = stack['volume2']
ex = self.assertRaises(exc, rsrc.validate)
self.assertEqual(err_msg, six.text_type(ex))
def test_cinder_create_with_image_and_imageRef(self):
stack_name = 'test_create_with_image_and_imageRef'
combinations = {'imageRef': 'image-456', 'image': 'image-123'}
err_msg = ("Cannot define the following properties at the same "
"time: image, imageRef.")
self.stub_ImageConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.ResourcePropertyConflict)
def test_cinder_create_with_size_snapshot_and_image(self):
stack_name = 'test_create_with_size_snapshot_and_image'
combinations = {
'size': 1,
'image': 'image-123',
'snapshot_id': 'snapshot-123'}
self.stub_ImageConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'image\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_size_snapshot_and_imageRef(self):
stack_name = 'test_create_with_size_snapshot_and_imageRef'
combinations = {
'size': 1,
'imageRef': 'image-123',
'snapshot_id': 'snapshot-123'}
self.stub_ImageConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'imageRef\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_size_snapshot_and_sourcevol(self):
stack_name = 'test_create_with_size_snapshot_and_sourcevol'
combinations = {
'size': 1,
'source_volid': 'volume-123',
'snapshot_id': 'snapshot-123'}
self.stub_VolumeConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'source_volid\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_snapshot_and_source_volume(self):
stack_name = 'test_create_with_snapshot_and_source_volume'
combinations = {
'source_volid': 'source_volume-123',
'snapshot_id': 'snapshot-123'}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [\'snapshot_id\', \'source_volid\'].')
self.stub_VolumeConstraint_validate()
self.stub_SnapshotConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_image_and_source_volume(self):
stack_name = 'test_create_with_image_and_source_volume'
combinations = {
'source_volid': 'source_volume-123',
'image': 'image-123'}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [\'source_volid\', \'image\'].')
self.stub_VolumeConstraint_validate()
self.stub_ImageConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_no_size_no_combinations(self):
stack_name = 'test_create_no_size_no_options'
combinations = {}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_volume_restore(self):
stack_name = 'test_cvolume_restore_stack'
# create script
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
# snapshot script
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('available'))
# restore script
fvbr = vt_base.FakeBackupRestore('vol-123')
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.cinder_fc.volumes.update('vol-123',
description='test_description',
name='test_name')
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack.create)()
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
scheduler.TaskRunner(stack.snapshot)()
self.assertEqual((stack.SNAPSHOT, stack.COMPLETE), stack.state)
data = stack.prepare_abandon()
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, stack.id)
stack.restore(fake_snapshot)
self.assertEqual((stack.RESTORE, stack.COMPLETE), stack.state)
self.m.VerifyAll()
|
apache-2.0
| -4,979,401,692,813,748,000
| 39.883365
| 79
| 0.607637
| false
| 3.770411
| true
| false
| false
|
arruda/rmr
|
rmr/apps/books/migrations/0006_books_to_userbooks.py
|
1
|
8533
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
Book = orm['books.Book']
UserBook = orm['books.UserBook']
for book in Book.objects.all():
user = book.user
userBook = UserBook(user=user,book=book)
userBook.desired = book.desired
userBook.purchase_store = book.purchase_store
userBook.purchased = book.purchased
userBook.purchase_value = book.purchase_value
userBook.purchase_date = book.purchase_date
userBook.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'authors.author': {
'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Author'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'books.book': {
'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Book'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'books'", 'to': "orm['authors.Author']"}),
'desired': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['books.Genre']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'books'", 'to': "orm['publishers.Publisher']"}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_store': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'books_old'", 'null': 'True', 'to': "orm['stores.Store']"}),
'purchase_value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'purchased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'release_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'null': 'True', 'blank': 'True'}),
'synopsis': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'books.genre': {
'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Genre'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'books.userbook': {
'Meta': {'object_name': 'UserBook'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['books.Book']"}),
'desired': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_store': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'books'", 'null': 'True', 'to': "orm['stores.Store']"}),
'purchase_value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'purchased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'books'", 'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'publishers.publisher': {
'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Publisher'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'stores.store': {
'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Store'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['books']
symmetrical = True
|
mit
| -4,910,646,489,504,296,000
| 70.108333
| 182
| 0.545881
| false
| 3.759031
| false
| false
| false
|
az0/entity-metadata
|
code/etl_openlibrary.py
|
1
|
2492
|
#!/usr/bin/python3
#
# Copyright (C) 2019 by Compassion International. All rights reserved.
# License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>.
# This is free software: you are free to change and redistribute it.
# There is NO WARRANTY, to the extent permitted by law.
"""
This program ETLs the Open Library authors dump file.
The input is a tab-delimited file with JSON in one column.
The output is a simpler file, which is a CSV with basic biographical
information plus unique identifiers.
Get the dump from here
https://openlibrary.org/developers/dumps
Do not decompress the dump file.
"""
import csv
import sys
import gzip
import json
csv.field_size_limit(sys.maxsize)
# The value id_wikidata (not nested under remote_ids) is defined
# exactly once out of 6.9M records, and in that case it's redundant
# to the value nested under remote_ids. It seems to be a mistake,
# so we'll ignore it.
retain_keys = ['key', 'id_wikidata', 'entity_type', 'name', 'fuller_name', 'personal_name', 'alternate_names',
'birth_date', 'death_date']
def process_json(j, writer):
author = json.loads(j)
author_retain = {}
for retain_key in retain_keys:
if retain_key in author:
author_retain[retain_key] = author[retain_key]
if 'remote_ids' in author and 'wikidata' in author['remote_ids']:
# extract nested value
author_retain['id_wikidata'] = author['remote_ids']['wikidata']
if 'alternate_names' in author:
# reformat multiple items from JSON list to pipe delimited
author_retain['alternate_names'] = '|'.join(author['alternate_names'])
writer.writerow(author_retain)
def go():
if len(sys.argv) != 3:
print(
'Usage: %s (path to OpenLibrary authors .txt.gz) (path to output .csv)' % sys.argv[0])
sys.exit(1)
txt_gz_fn = sys.argv[1]
csv_out_fn = sys.argv[2]
with gzip.open(txt_gz_fn, 'rt') as inf: # inf= IN File
reader = csv.reader(inf, delimiter='\t')
with open(csv_out_fn, 'w') as outf:
writer = csv.DictWriter(outf, fieldnames=retain_keys)
writer.writeheader()
print('Processing...')
count = 0
for row in reader:
process_json(row[4], writer)
count += 1
if (count % 10000) == 0:
# progress indicator
print('.', end='', flush=True)
print('\nDone.')
go()
|
gpl-3.0
| -2,435,090,541,281,703,000
| 30.15
| 110
| 0.630016
| false
| 3.529745
| false
| false
| false
|
18F/github-issue-lifecycle
|
app/models.py
|
1
|
15856
|
import itertools
import os
from collections import OrderedDict
from datetime import date, datetime, timedelta
import requests
from requests.auth import HTTPBasicAuth
from . import db
from .app import app
from .utils import to_py_datetime
GH_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
BEGINNING_OF_TIME = '1970-01-01T00:00:00Z'
BEGINNING_DATETIME = datetime.strptime(BEGINNING_OF_TIME, GH_DATE_FORMAT)
def authorization():
try:
auth = HTTPBasicAuth(os.environ['GITHUB_USER'],
os.environ['GITHUB_AUTH'])
return auth
except KeyError:
app.logger.warning(
'Environment variables GITHUB_USER and GITHUB_AUTH not set')
app.logger.warning('Skipping authentication...')
return None
class Repo(db.Model):
id = db.Column(db.Integer, primary_key=True)
owner = db.Column(db.Text, nullable=False)
name = db.Column(db.Text, nullable=False)
synched_at = db.Column(db.DateTime(),
nullable=False,
default=BEGINNING_DATETIME)
issues = db.relationship('Issue',
cascade='all, delete-orphan',
order_by='Issue.created_at',
backref='repo')
ISSUES_PAGE_SIZE = 100
@classmethod
def get_fresh(cls, owner_name, repo_name, refresh_threshhold_seconds=None):
"""For a repo ``repo_name`` owned by ``owner_name``:
1. Fetches or creates the Repo model instance
2. Refreshes the data from Github if necessary"""
if refresh_threshhold_seconds is None:
refresh_threshhold_seconds = app.config[
'REFRESH_THRESHHOLD_SECONDS']
(owner_name, repo_name) = (owner_name.lower(), repo_name.lower())
repo = (cls.query.filter_by(owner=owner_name,
name=repo_name).first() or
cls(owner=owner_name,
name=repo_name,
synched_at=BEGINNING_DATETIME))
if (datetime.now() - repo.synched_at) > timedelta(
seconds=int(refresh_threshhold_seconds)):
repo.fetch_issues()
db.session.add(repo)
db.session.commit()
repo.set_milestone_color_map()
return repo
def url(self):
return 'https://api.github.com/repos/{}/{}/'.format(self.owner,
self.name)
@classmethod
def _latest_update(cls, items, field_name='updated_at'):
"Returns latest `field_name` in `items`"
updates = [datetime.strptime(
i.get(field_name, BEGINNING_OF_TIME), GH_DATE_FORMAT)
for i in items]
return max(updates).strftime(GH_DATE_FORMAT)
def raw_issue_data(self):
params = {
'since': self.synched_at.strftime(GH_DATE_FORMAT),
'per_page': self.ISSUES_PAGE_SIZE,
'sort': 'updated',
'direction': 'asc',
'state': 'all' # include closed issues
}
auth = authorization()
issues = requests.get(self.url() + 'issues', params=params, auth=auth)
if issues.ok:
result = {}
new_issues = [i for i in issues.json()
if i['number'] not in result]
while new_issues:
result.update({i['number']: i for i in new_issues})
# Github seems to be ignoring `sort` parameter, have to
# check all results, alas
params['since'] = self._latest_update(new_issues)
issues = requests.get(self.url() + 'issues',
params=params,
auth=authorization())
new_issues = [i
for i in issues.json()
if i['number'] not in result]
return result.values()
else:
err_msg = 'Could not fetch issues for repo {}/{}: {}'.format(
self.owner, self.name, issues.text)
if not auth:
err_msg += '\nNOTE: Environment variables GITHUB_USER and GITHUB_AUTH not set'
raise FileNotFoundError(err_msg)
def fetch_issues(self):
"""Refresh the database's store of issues for this repo from github."""
for issue_data in self.raw_issue_data():
issue = Issue.query.filter_by(
number=issue_data.get('number')).first()
if issue:
db.session.delete(issue)
db.session.commit()
issue = Issue.from_raw(issue_data)
issue.repo = self
issue.fetch_events()
self.synched_at = datetime.now()
db.session.commit()
def json_summary(self):
result = dict(name=self.name,
owner=self.owner,
issues=[iss.json_summary() for iss in self.issues])
return result
def json_summary_flattened(self):
spans = list(self.spans())
result = dict(spans=spans,
stones=(self.stones()),
colors=[self.milestone_colors[s['span']['milestones'][
-1]] for s in spans], )
return result
def spans(self):
for (idx, iss) in enumerate(self.issues):
lifecycle = iss.lifecycle()
for span in lifecycle['spans']:
yield {'issue': iss,
'index': idx,
'span': span,
'final': lifecycle['final']}
def stones(self):
for (idx, iss) in enumerate(self.issues):
lifecycle = iss.lifecycle()
for stone in lifecycle['points']:
yield {'issue': iss, 'index': idx, 'stone': stone}
def milestones(self):
"List of milestones in all issues, in rough order of first appearance"
nested = [[e.milestone for e in i.events] for i in self.issues]
all_milestones = list(OrderedDict.fromkeys(
itertools.chain.from_iterable(nested)))
if None in all_milestones:
all_milestones.remove(None)
return all_milestones
_PALLETTE = ('greenyellow',
'cornflowerblue',
'hotpink',
'indigo',
'fuschia',
'green',
'lightskyblue',
'firebrick',
'gray',
'lightcoral',
'darkslategray',
'darkorange',
'darkolivegreen',
'cyan',
'chocolate',
'blueviolet',
'burlywood',
'aquamarine', )
def set_milestone_color_map(self):
"Decide a color to correspond to each type of milestone used in the repo"
colors = itertools.cycle(self._PALLETTE
) # reuse colors if too many milestones
self.milestone_colors = {}
for milestone in self.milestones():
self.milestone_colors[milestone] = colors.__next__()
self.milestone_colors.update({'opened': 'gold',
'reopened': 'gold',
'closed': 'black'})
labels_issues = db.Table(
'labels_issues',
db.Column('label_id', db.Integer, db.ForeignKey('label.id')),
db.Column('issue_id', db.Integer, db.ForeignKey('issue.id')))
class Issue(db.Model):
id = db.Column(db.Integer, primary_key=True)
repo_id = db.Column(db.Integer(), db.ForeignKey(Repo.id))
number = db.Column(db.Integer)
title = db.Column(db.String())
body = db.Column(db.String())
state = db.Column(db.String())
creator_login = db.Column(db.String(),
db.ForeignKey('person.login'),
nullable=False)
assignee_login = db.Column(db.String(),
db.ForeignKey('person.login'),
nullable=True)
comments = db.Column(db.String())
locked = db.Column(db.Boolean)
url = db.Column(db.String(), nullable=True)
events_url = db.Column(db.String(), nullable=True)
labels_url = db.Column(db.String(), nullable=True)
comments_url = db.Column(db.String(), nullable=True)
html_url = db.Column(db.String(), nullable=True)
created_at = db.Column(db.DateTime(), default=date.today)
updated_at = db.Column(db.DateTime(), default=date.today)
closed_at = db.Column(db.DateTime(), nullable=True)
labels = db.relationship('Label',
secondary=labels_issues,
backref=db.backref('issues',
lazy='dynamic'))
events = db.relationship('Event',
cascade='all, delete-orphan',
order_by='Event.created_at',
backref='issue')
@classmethod
def from_raw(cls, issue_data):
insertable = {
'id': issue_data.get('id'),
'number': issue_data.get('number'),
'title': issue_data.get('title'),
'state': issue_data.get('state'),
'body': issue_data.get('body'),
'locked': issue_data.get('locked'),
'url': issue_data.get('url'),
'labels_url': issue_data.get('labels_url'),
'html_url': issue_data.get('html_url'),
'events_url': issue_data.get('events_url'),
'updated_at': to_py_datetime(issue_data['updated_at']),
'created_at': to_py_datetime(issue_data['created_at']),
'closed_at': to_py_datetime(issue_data['closed_at']),
}
creator = Person.from_raw(issue_data['user'])
insertable['creator_login'] = creator.login
if issue_data.get('assignee'):
assignee = Person.from_raw(issue_data['assignee'])
insertable['assignee_login'] = assignee.login
issue = cls(**insertable)
for label_data in issue_data['labels']:
issue.labels.append(Label.get_or_create(label_data))
db.session.add(issue)
return issue
def fetch_events(self):
response = requests.get('{}?per_page=100'.format(self.events_url),
auth=authorization())
if self.number in (4, 17):
from pprint import pprint
with open('events{}.json'.format(self.number), 'w') as outfile:
pprint(response.json(), outfile)
# todo: if > 100 events?
if response.ok:
for raw_event in response.json():
self.events.append(Event.from_raw(raw_event))
def json_summary(self):
lifecycle = self.lifecycle()
return {
'number': self.number,
'title': self.title,
'html_url': self.html_url,
'created_at': self.created_at,
'updated_at': self.updated_at,
'closed_at': self.closed_at,
'spans': lifecycle['spans'],
'points': lifecycle['points'],
}
def lifecycle(self):
"""Description of the events of this issue's lifecycle.
Returns dict with:
final: Last milestone marked
points: (name, date) of milestones and open/close events
spans: ([statuses], start date, end date) describing each time period
in the issue's lifecycle.
[statuses] is the list of milestones in effect. The last in the list
will generally be the one of interest.
"""
statuses = ['opened', ]
result = {'spans': [], 'final': 'opened', 'points': []}
start_date = self.created_at
for event in self.events:
if event.event in ('milestoned', 'demilestoned', 'closed',
'reopened'):
if event.milestone and event.milestone in statuses:
continue
result['spans'].append({'milestones': statuses[:],
'start': start_date,
'end': event.created_at})
if event.event == 'demilestoned':
try:
statuses.remove(event.milestone)
except ValueError:
pass # sometimes they demilestone a nonexistent milestone!
elif event.event == 'milestoned':
statuses.append(event.milestone)
elif event.event in ('closed', 'reopened'):
statuses.append(event.event)
result['points'].append({'status': statuses[-1],
'at': event.created_at})
start_date = event.created_at
if self.closed_at:
if statuses[-1] != 'closed':
if self.closed_at > start_date:
result['spans'].append({'milestones': statuses[:],
'start': start_date,
'end': self.closed_at})
result['points'].append({'status': 'closed',
'at': self.closed_at})
else:
result['spans'].append({'milestones': statuses[:],
'start': start_date,
'end': datetime.now()})
result['final'] = [s for s in statuses
if s not in ('closed', 'reopened')][-1]
return result
class Person(db.Model):
login = db.Column(db.String(), primary_key=True)
url = db.Column(db.String(), nullable=True)
created = db.relationship('Issue',
foreign_keys=[Issue.creator_login, ],
backref='author')
assigned = db.relationship('Issue',
foreign_keys=[Issue.assignee_login, ],
backref='assignee')
@classmethod
def from_raw(cls, raw_data):
person = cls.query.filter_by(login=raw_data['login']).first()
if person:
person.url = raw_data.get('url')
else:
person = cls(login=raw_data['login'], url=raw_data.get('url'))
db.session.add(person)
db.session.flush() # TODO: ugh, all this flushing
return person
class Label(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
url = db.Column(db.String())
color = db.Column(db.String(), nullable=True)
@classmethod
def get_or_create(cls, label_data):
label = cls.query.filter_by(name=label_data['name']).first() \
or cls(**label_data)
return label
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True)
commit_id = db.Column(db.String())
url = db.Column(db.String())
actor = db.Column(db.String())
event = db.Column(db.String())
milestone = db.Column(db.String())
created_at = db.Column(db.DateTime())
issue_id = db.Column(db.Integer, db.ForeignKey('issue.id'))
@classmethod
def from_raw(cls, event_data):
"Given dict of event data fetched from GitHub API, return instance"
insertable = dict(
id=event_data['id'],
commit_id=event_data['commit_id'],
url=event_data['url'],
actor=event_data['actor'].get('login') if event_data[
'actor'] else None,
milestone=event_data.get('milestone') and event_data['milestone'][
'title'],
event=event_data['event'],
created_at=to_py_datetime(event_data.get('created_at')), )
return cls(**insertable)
|
cc0-1.0
| 6,412,924,456,398,388,000
| 38.739348
| 94
| 0.521443
| false
| 4.206951
| false
| false
| false
|
cscutcher/naruto-aufs-layers
|
naruto/cli.py
|
1
|
8374
|
# -*- coding: utf-8 -*-
"""
Main group for naruto cli
"""
import io
import logging
import os
import pathlib
import shutil
import click
from naruto import NarutoLayer, LayerNotFound
DEV_LOGGER = logging.getLogger(__name__)
DEFAULT_NARUTO_HOME = pathlib.Path(os.path.expanduser('~/.naruto'))
DEFAULT_LOG_LEVEL = logging.INFO
class CLIContext(object):
'''
Context for CLI
'''
def __init__(self):
self.naruto_home = DEFAULT_NARUTO_HOME
cli_context = click.make_pass_decorator(CLIContext, ensure=True)
@click.group()
@click.option(
'--naruto-home',
default=str(DEFAULT_NARUTO_HOME),
type=click.Path(
file_okay=False,
dir_okay=True,
writable=True,
readable=True,
resolve_path=True,
exists=False),
help=(
'Set default config directory used to store and retrieve layers. Default: {}'.format(
DEFAULT_NARUTO_HOME)))
@click.option(
'--verbosity',
'-V',
help='Set verbosity level explicitly (int or CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET)',
default=DEFAULT_LOG_LEVEL,
type=str)
@cli_context
def naruto_cli(ctx, naruto_home, verbosity):
'''
CLI for naruto
'''
try:
verbosity = int(verbosity)
except ValueError:
#Ints and strings are ok
pass
logging.basicConfig(level=verbosity)
DEV_LOGGER.debug('Set log level to %s', verbosity)
ctx.naruto_home = pathlib.Path(naruto_home)
DEV_LOGGER.debug('Home path is %r', ctx.naruto_home)
class _LayerLookup(click.ParamType):
'''
Type which loads naruto dir
'''
name = 'NarutoDir'
def __init__(self, allow_discovery=True):
self._allow_discovery = allow_discovery
def convert(self, value, param, local_context):
'''
Parse Naruto argument
'''
DEV_LOGGER.debug('Trying to find root layer for value %r', value)
root_spec, _, layer_spec = value.partition(':')
cli_context = local_context.ensure_object(CLIContext)
if not root_spec and self._allow_discovery:
try:
layer = NarutoLayer.find_layer_mounted_at_dest(pathlib.Path(os.getcwd()))
except LayerNotFound:
self.fail(
'Couldn\'t auto-discover layer. '
'You must in a directory which is a mounted layer for auto-discovery to work')
else:
if os.sep in root_spec:
naruto_root = pathlib.Path(root_spec)
else:
naruto_root = cli_context.naruto_home / root_spec
try:
naruto_root, = tuple(naruto_root.iterdir())
except FileNotFoundError:
self.fail('Directory {} does not exist'.format(naruto_root))
except ValueError:
self.fail('Unexpected number of folders in {}'.format(naruto_root))
try:
layer = NarutoLayer(naruto_root)
except LayerNotFound:
self.fail('{!s} is not a layer.'.format(naruto_root))
if layer_spec:
layer = layer.find_layer(layer_spec)
DEV_LOGGER.debug('Parsed layer at %r from cli', layer)
return layer
@naruto_cli.command()
@click.argument('name_or_path')
@click.option('--description', help='Add description to new naruto layer')
@cli_context
def create(ctx, name_or_path, description):
'''
Create new NarutoLayer
'''
if os.sep in name_or_path:
path = pathlib.Path(name_or_path)
DEV_LOGGER.info('Creating at raw path %r', path)
else:
home_naruto_dir = ctx.naruto_home
if not home_naruto_dir.is_dir():
home_naruto_dir.mkdir()
home_naruto_dir = home_naruto_dir.resolve()
path = home_naruto_dir / name_or_path
if not path.is_dir():
path.mkdir()
# Check nothing nasty from user
assert path.parent == home_naruto_dir
DEV_LOGGER.info('Creating %r in naruto home %r', home_naruto_dir, name_or_path)
if len(tuple(path.iterdir())) != 0:
raise Exception('Expected create directory {!s} to be empty'.format(path))
NarutoLayer.create(path, description=description)
@naruto_cli.command()
@cli_context
def list_home_layers(ctx):
'''
List layers stored in home directory
'''
for path in ctx.naruto_home.iterdir():
click.echo(str(path))
#################################################################################################
## Commands that modify or inspect existing layers
#################################################################################################
def _modification_command(fn):
'''
Add common options for modification
'''
fn = naruto_cli.command()(fn)
layer_lookup_help = (
'This specifies the layer you want to act upon. '
'If not specified we will try and discover the layer you have currently mounted.')
fn = click.option('-l', '--layer', type=_LayerLookup(), default='', help=layer_lookup_help)(fn)
return fn
class InfoNodeAdapter(object):
'''
Adapt NarutoLayer for info printout
'''
def __init__(self, layer):
self._layer = layer
def output(self, io_stream, level, highlight):
io_stream.write('{indent}+-- {highlight}{layer!s}{highlight}\n'.format(
indent=' ' * level,
layer=self._layer,
highlight='!!!!' if self._layer in highlight else ''))
for child in self._layer:
self.__class__(child).output(io_stream, level + 1, highlight)
@_modification_command
def info(layer):
'''
Get info about a layer
'''
io_stream = io.StringIO()
InfoNodeAdapter(layer.get_root()).output(io_stream, 0, highlight=(layer,))
click.echo(io_stream.getvalue())
@_modification_command
@click.argument('mount_dest')
def mount(layer, mount_dest):
'''
Mount a layer
'''
layer.mount(mount_dest)
@_modification_command
@click.argument('mount_dest')
@click.option('--description', help='Add description to new naruto layer')
def branch_and_mount(layer, mount_dest, description):
'''
Branch a layer and mount at new dest
'''
layer.create_child(description=description).mount(mount_dest)
@_modification_command
def unmount_all(layer):
'''
Unmount all uses of this layer
'''
layer.unmount_all()
@_modification_command
def find_mounts(layer):
'''
Find where layer is mounted
'''
for branch in layer.find_mounted_branches_iter():
click.echo('{branch.path}={branch.permission} at {branch.mount_point}'.format(
branch=branch))
@_modification_command
@click.option('--no-prompt', default=False, is_flag=True)
def delete(layer, no_prompt):
'''
Delete a layer
'''
if no_prompt:
confirm = click.echo
else:
confirm = lambda message: click.confirm(message, abort=True)
if layer.has_children:
click.secho(
'WARNING: This layer has {} direct children and a further {} descendants.'.format(
len(layer.children),
len(layer.descendants)),
fg='red')
if layer.mounted:
confirm(
'{} is currently mounted. Must unmount first. Continue?'.format(layer))
layer.unmount_all()
confirm(
click.style(
'This will irreversible delete {} and all {} descendants. Continue?'.format(
layer, len(layer.descendants)),
fg='red'))
shutil.rmtree(str(layer.path.resolve()))
@_modification_command
@click.argument('description', default='')
def description(layer, description):
'''
Get set layer description
'''
if description:
layer.description = description
else:
click.echo(layer.description)
@_modification_command
@click.argument('tags', nargs=-1)
def tags(layer, tags):
'''
Get set tags
'''
if tags:
layer.tags = tags
else:
click.echo(', '.join(layer.tags))
@_modification_command
@click.argument('tags', nargs=-1)
def add_tags(layer, tags):
''' Add tag to layer'''
layer.tags = layer.tags.union(tags)
@_modification_command
@click.argument('tags', nargs=-1)
def remove_tags(layer, tags):
''' Remove tag from layer'''
layer.tags = layer.tags.difference(tags)
|
gpl-3.0
| -2,532,855,138,760,252,400
| 26.455738
| 99
| 0.597683
| false
| 3.790856
| false
| false
| false
|
pas256/troposphere
|
troposphere/servicecatalog.py
|
1
|
4652
|
# Copyright (c) 2012-2018, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import boolean
class AcceptedPortfolioShare(AWSObject):
resource_type = "AWS::ServiceCatalog::AcceptedPortfolioShare"
props = {
'AcceptLanguage': (basestring, False),
'PortfolioId': (basestring, True),
}
class ProvisioningArtifactProperties(AWSProperty):
props = {
'Description': (basestring, False),
'Info': (dict, True),
'Name': (basestring, False),
}
class CloudFormationProduct(AWSObject):
resource_type = "AWS::ServiceCatalog::CloudFormationProduct"
props = {
'AcceptLanguage': (basestring, False),
'Description': (basestring, False),
'Distributor': (basestring, False),
'Name': (basestring, True),
'Owner': (basestring, True),
'ProvisioningArtifactParameters':
([ProvisioningArtifactProperties], True),
'SupportDescription': (basestring, False),
'SupportEmail': (basestring, False),
'SupportUrl': (basestring, False),
'Tags': (Tags, False),
}
class ProvisioningParameter(AWSProperty):
props = {
'Key': (basestring, False),
'Value': (basestring, False),
}
class CloudFormationProvisionedProduct(AWSObject):
resource_type = "AWS::ServiceCatalog::CloudFormationProvisionedProduct"
props = {
'AcceptLanguage': (basestring, False),
'NotificationArns': ([basestring], False),
'PathId': (basestring, False),
'ProductId': (basestring, False),
'ProductName': (basestring, False),
'ProvisionedProductName': (basestring, False),
'ProvisioningArtifactId': (basestring, False),
'ProvisioningArtifactName': (basestring, False),
'ProvisioningParameters': ([ProvisioningParameter], False),
'Tags': (Tags, False),
}
class LaunchNotificationConstraint(AWSObject):
resource_type = "AWS::ServiceCatalog::LaunchNotificationConstraint"
props = {
'AcceptLanguage': (basestring, False),
'Description': (basestring, False),
'NotificationArns': ([basestring], True),
'PortfolioId': (basestring, True),
'ProductId': (basestring, True),
}
class LaunchRoleConstraint(AWSObject):
resource_type = "AWS::ServiceCatalog::LaunchRoleConstraint"
props = {
'AcceptLanguage': (basestring, False),
'Description': (basestring, False),
'PortfolioId': (basestring, True),
'ProductId': (basestring, True),
'RoleArn': (basestring, True),
}
class LaunchTemplateConstraint(AWSObject):
resource_type = "AWS::ServiceCatalog::LaunchTemplateConstraint"
props = {
'AcceptLanguage': (basestring, False),
'Description': (basestring, False),
'PortfolioId': (basestring, True),
'ProductId': (basestring, True),
'Rules': (basestring, True),
}
class Portfolio(AWSObject):
resource_type = "AWS::ServiceCatalog::Portfolio"
props = {
'AcceptLanguage': (basestring, False),
'Description': (basestring, False),
'DisplayName': (basestring, True),
'ProviderName': (basestring, True),
'Tags': (Tags, False),
}
class PortfolioPrincipalAssociation(AWSObject):
resource_type = "AWS::ServiceCatalog::PortfolioPrincipalAssociation"
props = {
'AcceptLanguage': (basestring, False),
'PortfolioId': (basestring, True),
'PrincipalARN': (basestring, True),
'PrincipalType': (basestring, True),
}
class PortfolioProductAssociation(AWSObject):
resource_type = "AWS::ServiceCatalog::PortfolioProductAssociation"
props = {
'AcceptLanguage': (basestring, False),
'PortfolioId': (basestring, True),
'ProductId': (basestring, True),
'SourcePortfolioId': (basestring, False),
}
class PortfolioShare(AWSObject):
resource_type = "AWS::ServiceCatalog::PortfolioShare"
props = {
'AcceptLanguage': (basestring, False),
'AccountId': (basestring, True),
'PortfolioId': (basestring, True),
}
class TagOption(AWSObject):
resource_type = "AWS::ServiceCatalog::TagOption"
props = {
'Active': (boolean, False),
'Key': (basestring, True),
'Value': (basestring, True),
}
class TagOptionAssociation(AWSObject):
resource_type = "AWS::ServiceCatalog::TagOptionAssociation"
props = {
'ResourceId': (basestring, True),
'TagOptionId': (basestring, True),
}
|
bsd-2-clause
| -6,446,356,054,065,016,000
| 27.193939
| 75
| 0.634136
| false
| 4.161002
| false
| false
| false
|
robertmattmueller/sdac-compiler
|
tests/test_normalization.py
|
1
|
1628
|
try:
# Python 2
from StringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
import pddl
from pddl_to_prolog import Rule, PrologProgram
def test_normalization():
prog = PrologProgram()
prog.add_fact(pddl.Atom("at", ["foo", "bar"]))
prog.add_fact(pddl.Atom("truck", ["bollerwagen"]))
prog.add_fact(pddl.Atom("truck", ["segway"]))
prog.add_rule(Rule([pddl.Atom("truck", ["?X"])], pddl.Atom("at", ["?X", "?Y"])))
prog.add_rule(Rule([pddl.Atom("truck", ["X"]), pddl.Atom("location", ["?Y"])],
pddl.Atom("at", ["?X", "?Y"])))
prog.add_rule(Rule([pddl.Atom("truck", ["?X"]), pddl.Atom("location", ["?Y"])],
pddl.Atom("at", ["?X", "?X"])))
prog.add_rule(Rule([pddl.Atom("p", ["?Y", "?Z", "?Y", "?Z"])],
pddl.Atom("q", ["?Y", "?Y"])))
prog.add_rule(Rule([], pddl.Atom("foo", [])))
prog.add_rule(Rule([], pddl.Atom("bar", ["X"])))
prog.normalize()
output = StringIO()
prog.dump(file=output)
sorted_output = "\n".join(sorted(output.getvalue().splitlines()))
assert sorted_output == """\
Atom @object(bar).
Atom @object(bollerwagen).
Atom @object(foo).
Atom @object(segway).
Atom at(foo, bar).
Atom bar(X).
Atom foo().
Atom truck(bollerwagen).
Atom truck(segway).
none Atom at(?X, ?X@0) :- Atom truck(?X), Atom location(?Y), Atom =(?X, ?X@0).
none Atom at(?X, ?Y) :- Atom truck(?X), Atom @object(?Y).
none Atom at(?X, ?Y) :- Atom truck(X), Atom location(?Y), Atom @object(?X).
none Atom q(?Y, ?Y@0) :- Atom p(?Y, ?Z, ?Y, ?Z), Atom =(?Y, ?Y@0), Atom =(?Y, ?Y@1), Atom =(?Z, ?Z@2)."""
|
gpl-3.0
| -3,731,900,976,421,474,300
| 37.761905
| 105
| 0.556511
| false
| 2.754653
| false
| false
| false
|
jacobajit/ion
|
intranet/middleware/ldap_db.py
|
1
|
1702
|
# -*- coding: utf-8 -*-
import logging
from django.contrib import messages
from ..db.ldap_db import LDAPConnection
logger = logging.getLogger(__name__)
class CheckLDAPBindMiddleware:
def process_response(self, request, response):
if not hasattr(request, "user") or "_auth_user_backend" not in request.session or not request.user.is_authenticated():
# Nothing to check if user isn't already logged in
return response
auth_backend = request.session["_auth_user_backend"]
kerberos_backend = "KerberosAuthenticationBackend"
if LDAPConnection().did_use_simple_bind() and auth_backend.startswith(kerberos_backend):
# if request.user.is_eighth_admin:
# logger.info("Simple bind being used: staying logged in because eighth admin.")
# return response
logger.info("LDAP simple bind being used for {}".format(request.user if request.user else None))
messages.error(request, "Access to directory information may be limited: LDAP issue. Try logging out and back in.")
"""
logger.info("Simple bind being used: Destroying kerberos cache and logging out")
try:
kerberos_cache = request.session["KRB5CCNAME"]
os.system("/usr/bin/kdestroy -c " + kerberos_cache)
except KeyError:
pass
logout(request)
response = redirect("login")
url = response["Location"]
response["Location"] = urls.add_get_parameters(
url, {"next": request.path}, percent_encode=False)
return response
"""
return response
|
gpl-2.0
| -8,821,804,357,833,348,000
| 38.581395
| 127
| 0.619271
| false
| 4.478947
| false
| false
| false
|
amarfurt/arr
|
remote.py
|
1
|
1112
|
"""
Starts the remote control worker.
"""
import os
import logging
import argparse
from workers.controller import Controller
def parse_args():
parser = argparse.ArgumentParser(description='Starts the remote control worker.')
parser.add_argument('--logpath', default=os.path.expanduser('~/logs/arr.log'),
help='Path to logfile.')
parser.add_argument('--loglevel', default='INFO', help='Logging level.')
return parser.parse_args()
def main(args):
# configure logging
logformat = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'
loglevel = logging.getLevelName(args.loglevel)
logging.basicConfig(filename=args.logpath, format=logformat, level=loglevel)
logging.getLogger('pika').setLevel(logging.WARNING)
log = logging.getLogger('main')
log.info('Starting system...')
# start control worker
log.info('Starting control worker...')
c = Controller('localhost', 'control')
c.start()
c.add_cpu()
log.info('System started')
c.join()
log.info('System stopped')
if __name__ == '__main__':
main(parse_args())
|
mit
| -6,781,648,225,161,846,000
| 28.263158
| 85
| 0.66277
| false
| 3.915493
| false
| false
| false
|
migasfree/migasfree
|
migasfree/catalog/migrations/0003_4_14_packages_by_project.py
|
1
|
2349
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import migasfree.server.models.common
class Migration(migrations.Migration):
dependencies = [
('server', '0022_4_14_computers'),
('catalog', '0002_4_14_versions'),
]
operations = [
migrations.CreateModel(
name='PackagesByProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('packages_to_install', models.TextField(blank=True, verbose_name='packages to install')),
],
options={
'verbose_name': 'Packages by Project',
'verbose_name_plural': 'Packages by Projects',
'permissions': (('can_save_packagesbyproject', 'Can save packages by project'),),
},
bases=(models.Model, migasfree.server.models.common.MigasLink),
),
migrations.AlterField(
model_name='application',
name='name',
field=models.CharField(max_length=50, unique=True, verbose_name='name'),
),
migrations.AlterUniqueTogether(
name='application',
unique_together=set([]),
),
migrations.AddField(
model_name='packagesbyproject',
name='application',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='catalog.Application', verbose_name='application',
related_name='packages_by_project'
),
),
migrations.AddField(
model_name='packagesbyproject',
name='project',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='server.Project', verbose_name='project'
),
),
migrations.RemoveField(
model_name='application',
name='packages_to_install',
),
migrations.RemoveField(
model_name='application',
name='project',
),
migrations.AlterUniqueTogether(
name='packagesbyproject',
unique_together={('application', 'project')},
),
]
|
gpl-3.0
| -6,319,951,951,391,928,000
| 33.043478
| 114
| 0.553427
| false
| 4.651485
| false
| false
| false
|
robertostling/bnas
|
bnas/model.py
|
1
|
50808
|
"""Network models and submodels.
The :class:`Model` class is used to encapsulate a set of Theano shared
variables (model parameters), and can create symbolic expressions for model
outputs and loss functions.
This module also contains subclasses, such as :class:`Linear`, that function
as building blocks for more complex networks.
"""
from collections import OrderedDict
import pickle
import sys
import numpy as np
import theano
from theano.ifelse import ifelse
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano import tensor as T
from . import init
from . import search
from .fun import train_mode, function
from .utils import expand_to_batch, softmax_masked, softmax_3d, softmax_4d
class Model:
"""Base class for neural network models.
Attributes
----------
name : str
Name of the model.
params : OrderedDict of str -> :class:`theano.compile.sharedvalue.SharedVariable`
Mapping from parameter names to Theano shared variables. Note that
submodel parameters are not included, so this should normally not be
accessed directly, rather use `self.parameters()`.
regularization : list of Theano symbolic expressions
These expressions should all be added to the loss function when
optimizing. Use `self.regularize()` to modify.
"""
def __init__(self, name):
"""Initialize an empty model.
Parameters
----------
name : str
Name of the model.
"""
self.name = name
self.params = OrderedDict()
self.regularization = []
self.submodels = OrderedDict()
def loss(self):
"""Part of the loss function that is independent of inputs."""
terms = [submodel.loss() for submodel in self.submodels.values()] \
+ self.regularization
return sum(terms, T.as_tensor_variable(0.0))
def parameters(self, include_submodels=True):
"""Iterate over the parameters of this model and its submodels.
Each value produced by the iterator is a tuple (name, value), where
the name is a tuple of strings describing the hierarchy of submodels,
e.g. ('hidden', 'b'), and the value is a Theano shared variable.
Parameters
----------
include_submodels : bool
If ``True`` (default), also iterate over submodel parameters.
"""
for name, p in self.params.items():
yield ((name,), p)
if include_submodels:
for submodel in self.submodels.values():
for name, p in submodel.parameters():
yield ((submodel.name,) + name, p)
def summarize(self, grads, f=sys.stdout):
def tensor_stats(m):
return ', '.join([
'norm = %g' % np.sqrt((m*m).sum()),
'maxabs = %g' % np.abs(m).max(),
'minabs = %g' % np.abs(m).min()])
def summarize_parameter(name, p, g):
p_stats = tensor_stats(p)
g_stats = tensor_stats(g)
print('%s\n parameter %s\n gradient %s' % (
name, p_stats, g_stats),
file=f)
params = list(self.parameters())
assert len(grads) == len(params)
for (name, p), grad in zip(params, grads):
summarize_parameter('.'.join(name), p.get_value(), grad)
f.flush()
def parameters_list(self, include_submodels=True):
"""Return a list with parameters, without their names."""
return list(p for name, p in
self.parameters(include_submodels=include_submodels))
def parameter(self, name):
"""Return the parameter with the given name.
Parameters
----------
name : tuple of str
Path to variable, e.g. ('hidden', 'b') to find the parameter 'b'
in the submodel 'hidden'.
Returns
-------
value : :class:`theano.compile.sharedvalue.SharedVariable`
"""
if not isinstance(name, tuple):
raise TypeError('Expected tuple, got %s' % type(name))
if len(name) == 1:
return self.params[name[0]]
elif len(name) >= 2:
return self.submodels[name[0]].parameter(name[1:])
else:
raise ValueError('Name tuple must not be empty!')
def parameter_count(self):
"""Return the total number of parameters of the model."""
return sum(p.get_value(borrow=True).size for _,p in self.parameters())
def param(self, name, dims, init_f=None,
value=None, dtype=theano.config.floatX):
"""Create a new parameter, or share an existing one.
Parameters
----------
name : str
Name of parameter, this will be used directly in `self.params`
and used to create `self._name`.
dims : tuple
Shape of the parameter vector.
value : :class:`theano.compile.sharedvalue.SharedVariable`, optional
If this parameter should be shared, a SharedVariable instance can
be passed here.
init_f : (tuple => numpy.ndarray)
Function used to initialize the parameter vector.
dtype : str or numpy.dtype
Data type (default is `theano.config.floatX`)
Returns
-------
p : :class:`theano.compile.sharedvalue.SharedVariable`
"""
if name in self.params:
if not value is None:
raise ValueError('Trying to add a shared parameter (%s), '
'but a parameter with the same name already '
'exists in %s!' % (name, self.name))
return self.params[name]
if value is None:
if init_f is None:
raise ValueError('Creating new parameter, but no '
'initialization specified!')
p = theano.shared(init_f(dims, dtype=dtype), name=name)
self.params[name] = p
else:
p = value
setattr(self, '_'+name, p)
return p
def regularize(self, p, regularizer):
"""Add regularization to a parameter.
Parameters
----------
p : :class:`theano.compile.sharedvalue.SharedVariable`
Parameter to apply regularization
regularizer : function
Regularization function, which should return a symbolic
expression.
"""
if not regularizer is None:
self.regularization.append(regularizer(p))
def add(self, submodel):
"""Import parameters from a submodel.
If a submodel named "hidden" has a parameter "b", it will be imported
as "hidden_b", also accessible as `self._hidden_b`.
Parameters
----------
submodel : :class:`.Model`
Returns
-------
submodel : :class:`.Model`
Equal to the parameter, for convenience.
"""
if submodel.name in self.submodels:
raise ValueError('Submodel with name %s already exists in %s!' % (
submodel.name, self.name))
self.submodels[submodel.name] = submodel
setattr(self, submodel.name, submodel)
return submodel
def save(self, f, include_submodels=True):
"""Save the parameter values of this model to a file object.
Parameters
----------
f : file
File object to write to, assumed to be opened in 'wb' mode.
include_submodels : bool
If ``True`` (default), also save submodel parameters.
"""
pickle.dump({name: p.get_value(borrow=True)
for name, p in self.parameters(
include_submodels=include_submodels)},
f, -1)
def load(self, f, allow_incomplete=False, allow_unused=False):
"""Load (some) weights of this model from a file object.
Parameters
----------
f : file
File object to read from, assumeb to be opened in 'rb' mode.
allow_incomplete : bool
If ``False``, throw a `ValueError` if some model parameters are
missing in the file.
allow_unused : bool
If ``False``, throw a `ValueError` if the file contains model
parameters that are not used in this model.
"""
data = pickle.load(f)
parameters = dict(self.parameters())
names = frozenset(data.keys()) & frozenset(parameters.keys())
if not allow_incomplete and len(names) < len(parameters):
diff = sorted(frozenset(parameters.keys()) - names)
raise ValueError(
'The following parameters are missing: %s' % ', '.join(
'.'.join(t) for t in diff))
if not allow_unused and len(names) < len(data):
diff = sorted(frozenset(data.keys()) - names)
raise ValueError(
'The following parameters are unused: %s' % ', '.join(
'.'.join(t) for t in diff))
for name in names:
value = data[name]
old_value = parameters[name].get_value(borrow=True)
if value.shape != old_value.shape:
raise ValueError(
'Loaded shape is %s but %s expected' % (
value.shape, old_value.shape))
parameters[name].set_value(value)
def compile(self, *args):
return function(list(args), self(*args))
class Linear(Model):
"""Fully connected linear layer.
This layer creates one shared parameter, `w` of shape
`(input_dims, output_dims)` if `use_bias` is ``False``, otherwise it
also creates `name_b` of shape `output_dims` for biases.
Parameters
----------
name : str
Name of layer.
input_dims : int
Number of inputs.
output_dims : int
Number of outputs.
w : :class:`theano.compile.sharedvalue.SharedVariable`
Weight vector to use, or pass ``None`` (default) to create a new
one.
w_init : :class:`.init.InitializationFunction`
Initialization for weight vector, in case `w` is ``None``.
w_regularizer : :class:`.regularize.Regularizer`, optional
Regularization for weight matrix.
b : :class:`theano.compile.sharedvalue.SharedVariable`
Bias vector to use, or pass ``None`` (default) to create a new
one.
b_init : :class:`.init.InitializationFunction`
Initialization for bias vector, in case `b` is ``None``.
b_regularizer : :class:`.regularize.Regularizer`, optional
Regularization for biases.
use_bias : bool
If ``False``, no bias is used and the `b` and `b_init` parameters
are ignored.
dropout : float
Dropout factor (the default value of 0 means dropout is not used).
layernorm : bool
If ``True``, layer normalization is used on the activations.
"""
def __init__(self, name, input_dims, output_dims,
w=None, w_init=None, w_regularizer=None,
b=None, b_init=None, b_regularizer=None,
use_bias=True, dropout=0, layernorm=False):
super().__init__(name)
self.input_dims = input_dims
self.output_dims = output_dims
self.use_bias = use_bias
self.dropout = dropout
self.layernorm = layernorm
if w_init is None: w_init = init.Gaussian(fan_in=input_dims)
if b_init is None: b_init = init.Constant(0.0)
self.param('w', (input_dims, output_dims), init_f=w_init, value=w)
self.regularize(self._w, w_regularizer)
if use_bias:
self.param('b', (output_dims,), init_f=b_init, value=b)
self.regularize(self._b, b_regularizer)
if dropout:
self.add(Dropout('dropout', dropout))
if layernorm:
self.add(LayerNormalization('ln', (None, output_dims)))
def __call__(self, inputs):
outputs = T.dot(inputs, self._w)
if self.layernorm: outputs = self.ln(outputs)
if self.use_bias: outputs = outputs + self._b
if self.dropout: outputs = self.dropout(outputs)
return outputs
class Embeddings(Model):
"""Embeddings layer.
This layer creates one shared parameter, `w` of shape
`(alphabet_size, embedding_dims)`.
Parameters
----------
name : str
Name of layer.
alphabet_size : int
Size of symbol alphabet.
embedding_dims : int
Dimensionality of embeddings.
w : :class:`theano.compile.sharedvalue.SharedVariable`
Weight vector to use, or pass ``None`` (default) to create a new
one.
w_init : :class:`.init.InitializationFunction`
Initialization for weight vector, in case `w` is ``None``.
w_regularizer : :class:`.regularize.Regularizer`, optional
Regularization for weight matrix.
dropout : float
Dropout factor (the default value of 0 means dropout is not used).
"""
def __init__(self, name, alphabet_size, embedding_dims,
w=None, w_init=None, w_regularizer=None,
dropout=0):
super().__init__(name)
self.embedding_dims = embedding_dims
self.alphabet_size = alphabet_size
self.dropout = dropout
if w_init is None: w_init = init.Gaussian(fan_in=embedding_dims)
self.param('w',
(alphabet_size, embedding_dims), init_f=w_init, value=w)
self.regularize(self._w, w_regularizer)
if dropout:
self.add(Dropout('dropout', dropout, sequence=True))
def __call__(self, inputs):
outputs = self._w[inputs]
if self.dropout: outputs = self.dropout(outputs)
return outputs
class Conv1D(Model):
"""1D convolution layer with linear activations.
The input shape is assumed to be (batch_size, length, dims).
"""
def __init__(self, name, input_dims, output_dims,
filter_dims=3, stride=1,
f=None, f_init=None, f_regularizer=None,
b=None, b_init=None, b_regularizer=None):
super().__init__(name)
if f_init is None:
f_init = init.Gaussian(fan_in=filter_dims*input_dims)
if b_init is None:
b_init = init.Constant(0.0)
self.stride = stride
self.input_dims = input_dims
self.f_shape = (output_dims, input_dims, filter_dims, 1)
self.param('f', self.f_shape, init_f=f_init)
self.param('b', (output_dims,), init_f=b_init)
def __call__(self, inputs, inputs_mask):
x = T.nnet.conv2d(
(inputs * inputs_mask.dimshuffle(0,1,'x')
).dimshuffle(0,2,1,'x'),
self._f,
input_shape=(None, self.input_dims, None, 1),
filter_shape=self.f_shape,
border_mode='half',
subsample=(self.stride, 1),
filter_flip=True)
batch_size = inputs.shape[0]
length = inputs.shape[1]
dims = inputs.shape[2]
x = x.reshape((batch_size, dims, length)).dimshuffle(0,2,1)
return x + self._b.dimshuffle('x','x',0)
class LSTM(Model):
"""Long Short-Term Memory.
name : str
Name of layer.
input_dims : int
Length of each vector in the input sequence.
state_dims : int
Size of internal states. An LSTM contains two states, each of the will
be of size state_dims.
attention_dims : int
If specified, use attention and let this be the size of the hidden
attention state.
attented_dims : int
Dimensionality of the sequence to have attention on.
layernorm : str
One of `'ba1'` (eq 20--22 of Ba et al.), `'ba2'` (eq 29--31) or
`False` (no layer normalization).
"""
def __init__(self, name, input_dims, state_dims,
w=None, w_init=None, w_regularizer=None,
u=None, u_init=None, u_regularizer=None,
b=None, b_init=None, b_regularizer=None,
attention_dims=None, attended_dims=None,
layernorm=False, contextgate=False):
super().__init__(name)
assert layernorm in (False, 'ba1', 'ba2')
assert (attention_dims is None) == (attended_dims is None)
assert not (contextgate and (attention_dims is None))
self.n_states = 2
if attended_dims is not None:
if not contextgate:
input_dims += attended_dims
self.input_dims = input_dims
self.state_dims = state_dims
self.layernorm = layernorm
self.attention_dims = attention_dims
self.attended_dims = attended_dims
self.use_attention = attention_dims is not None
self.use_contextgate = contextgate
if w_init is None: w_init = init.Gaussian(fan_in=input_dims)
if u_init is None: u_init = init.Concatenated(
[init.Orthogonal()]*4, axis=1)
if b_init is None: b_init = init.Concatenated(
[init.Constant(x) for x in [0.0, 1.0, 0.0, 0.0]])
if self.use_contextgate:
self.param('wzg', (input_dims, state_dims*2),
init_f=init.Gaussian(fan_in=input_dims))
self.param('uzg', (state_dims, state_dims*2),
init_f=init.Concatenated([init.Orthogonal()]*2, axis=1))
self.param('bzg', (state_dims*2,), init_f=init.Constant(0.0))
self.param('czs', (attended_dims, state_dims*2),
init_f=init.Gaussian(fan_in=attended_dims))
self.param('bs', (state_dims,), init_f=init.Constant(0.0))
self.param('w', (state_dims, state_dims*4), init_f=w_init, value=w)
self.param('u', (state_dims, state_dims*4), init_f=u_init, value=u)
self.param('b', (state_dims*4,), init_f=b_init, value=b)
else:
self.param('w', (input_dims, state_dims*4), init_f=w_init, value=w)
self.param('u', (state_dims, state_dims*4), init_f=u_init, value=u)
self.param('b', (state_dims*4,), init_f=b_init, value=b)
if self.use_attention:
self.add(Linear('attention_u', attended_dims, attention_dims))
self.param('attention_w', (state_dims, attention_dims),
init_f=init.Gaussian(fan_in=state_dims))
self.param('attention_v', (attention_dims,),
init_f=init.Gaussian(fan_in=attention_dims))
self.regularize(self._attention_w, w_regularizer)
if layernorm == 'ba1':
self.add(LayerNormalization('ln_a', (None, attention_dims)))
self.regularize(self._w, w_regularizer)
self.regularize(self._u, u_regularizer)
self.regularize(self._b, b_regularizer)
if layernorm == 'ba1':
self.add(LayerNormalization('ln_1', (None, state_dims*4)))
self.add(LayerNormalization('ln_2', (None, state_dims*4)))
if layernorm:
self.add(LayerNormalization('ln_h', (None, state_dims)))
def __call__(self, inputs, h_tm1, c_tm1,
attended=None, attended_dot_u=None, attention_mask=None):
if self.use_attention:
# Non-precomputed part of the attention vector for this time step
# _ x batch_size x attention_dims
h_dot_w = T.dot(h_tm1, self._attention_w)
if self.layernorm == 'ba1': h_dot_w = self.ln_a(h_dot_w)
h_dot_w = h_dot_w.dimshuffle('x',0,1)
# Attention vector, with distributions over the positions in
# attended. Elements that fall outside the sentence in each batch
# are set to zero.
# sequence_length x batch_size
# Note that attention.T is returned
attention = softmax_masked(
T.dot(
T.tanh(attended_dot_u + h_dot_w),
self._attention_v).T,
attention_mask.T).T
# Compressed attended vector, weighted by the attention vector
# batch_size x attended_dims
compressed = (attended * attention.dimshuffle(0,1,'x')).sum(axis=0)
# Append the compressed vector to the inputs and continue as usual
if not self.use_contextgate:
inputs = T.concatenate([inputs, compressed], axis=1)
else:
zg = (T.dot(inputs, self._wzg) + T.dot(h_tm1, self._uzg) +
self._bzg.dimshuffle('x', 0))
zs = T.dot(compressed, self._czs)
def part(m,i):
return m[:, i*self.state_dims:(i+1)*self.state_dims]
z = T.nnet.sigmoid(part(zg,0) + part(zs,0))
g = part(zg,1)
s = part(zs,1) + self._bs.dimshuffle('x', 0)
inputs = z*s + (1-z)*g
if self.layernorm == 'ba1':
x = (self.ln_1(T.dot(inputs, self._w)) +
self.ln_2(T.dot(h_tm1, self._u)))
else:
x = T.dot(inputs, self._w) + T.dot(h_tm1, self._u)
x = x + self._b.dimshuffle('x', 0)
def x_part(i): return x[:, i*self.state_dims:(i+1)*self.state_dims]
i = T.nnet.sigmoid(x_part(0))
f = T.nnet.sigmoid(x_part(1))
o = T.nnet.sigmoid(x_part(2))
c = T.tanh( x_part(3))
c_t = f*c_tm1 + i*c
h_t = o*T.tanh(self.ln_h(c_t) if self.layernorm else c_t)
if self.use_attention:
return h_t, c_t, attention.T
else:
return h_t, c_t
class LSTMSequence(Model):
def __init__(self, name, backwards, *args,
dropout=0, trainable_initial=False, offset=0, **kwargs):
super().__init__(name)
self.backwards = backwards
self.trainable_initial = trainable_initial
self.offset = offset
self._step_fun = None
self._attention_u_fun = None
self.add(Dropout('dropout', dropout))
self.add(LSTM('gate', *args, **kwargs))
if self.trainable_initial:
self.param('h_0', (self.gate.state_dims,),
init_f=init.Gaussian(fan_in=self.gate.state_dims))
self.param('c_0', (self.gate.state_dims,),
init_f=init.Gaussian(fan_in=self.gate.state_dims))
def step(self, inputs, inputs_mask, h_tm1, c_tm1, h_mask, *non_sequences):
if self.gate.use_attention:
# attended is the
# src_sequence_length x batch_size x attention_dims
# matrix which we have attention on.
#
# attended_dot_u is the h_t-independent part of the final
# attention vectors, which is precomputed for efficiency.
#
# attention_mask is a binary mask over the valid elements of
# attended, which in practice is the same as the mask passed to
# the encoder that created attended. Size
# src_sequence_length x batch_size
h_t, c_t, attention = self.gate(
inputs, h_tm1 * h_mask.astype(theano.config.floatX), c_tm1,
attended=non_sequences[0],
attended_dot_u=non_sequences[1],
attention_mask=non_sequences[2])
return (T.switch(inputs_mask.dimshuffle(0, 'x'), h_t, h_tm1),
T.switch(inputs_mask.dimshuffle(0, 'x'), c_t, c_tm1),
attention)
else:
h_t, c_t = self.gate(
inputs, h_tm1 * h_mask.astype(theano.config.floatX), c_tm1)
return (T.switch(inputs_mask.dimshuffle(0, 'x'), h_t, h_tm1),
T.switch(inputs_mask.dimshuffle(0, 'x'), c_t, c_tm1))
def step_fun(self):
if self._step_fun is None:
inputs = T.matrix('inputs')
h_tm1 = T.matrix('h_tm1')
c_tm1 = T.matrix('c_tm1')
if self.gate.use_attention:
attended=T.tensor3('attended')
attended_dot_u=T.tensor3('attended_dot_u')
attention_mask=T.matrix('attention_mask')
self._step_fun = function(
[inputs, h_tm1, c_tm1,
attended, attended_dot_u, attention_mask],
self.step(inputs, T.ones(inputs.shape[:-1]),
h_tm1, c_tm1, T.ones_like(h_tm1),
attended, attended_dot_u, attention_mask),
name='%s_step_fun'%self.name)
else:
self._step_fun = function(
[inputs, h_tm1, c_tm1],
self.step(inputs, T.ones(inputs.shape[:-1]),
h_tm1, c_tm1, T.ones_like(h_tm1)),
name='%s_step_fun'%self.name)
return self._step_fun
def attention_u_fun(self):
assert self.gate.use_attention
if self._attention_u_fun is None:
attended = T.tensor3('attended')
self._attention_u_fun = function(
[attended], self.gate.attention_u(attended),
name='%s_attention_u_fun'%self.name)
return self._attention_u_fun
def search(self, predict_fun, embeddings,
start_symbol, stop_symbol, max_length,
h_0=None, c_0=None, attended=None, attention_mask=None,
beam_size=4):
if self.gate.use_attention:
attended_dot_u = self.attention_u_fun()(attended)
if self.trainable_initial:
if h_0 is None:
h_0 = self._h_0.get_value()[None,:]
if c_0 is None:
c_0 = self._c_0.get_value()[None,:]
def step(i, states, outputs, outputs_mask):
if self.gate.use_attention:
result = self.step_fun()(
embeddings[outputs[-1]], states[0], states[1],
attended, attended_dot_u, attention_mask)
else:
result = self.step_fun()(
embeddings[outputs[-1]], states[0], states[1])
h_t, c_t = result[:2]
return [h_t, c_t], predict_fun(h_t)
return search.beam(
step, [h_0, c_0], h_0.shape[0], start_symbol, stop_symbol,
max_length, beam_size=beam_size)
def __call__(self, inputs, inputs_mask, h_0=None, c_0=None,
attended=None, attention_mask=None):
if self.trainable_initial:
batch_size = inputs.shape[1]
if h_0 is None:
h_0 = expand_to_batch(self._h_0, batch_size)
if c_0 is None:
c_0 = expand_to_batch(self._c_0, batch_size)
attention_info = []
if self.gate.use_attention:
attention_info = [attended, self.gate.attention_u(attended),
attention_mask]
dropout_masks = [self.dropout.mask(h_0.shape)]
seqs, _ = theano.scan(
fn=self.step,
go_backwards=self.backwards,
sequences=[{'input': inputs, 'taps': [self.offset]},
{'input': inputs_mask, 'taps': [self.offset]}],
outputs_info=[h_0, c_0] + \
[None]*(1 if self.gate.use_attention else 0),
non_sequences=dropout_masks + attention_info + \
self.gate.parameters_list())
if self.backwards:
return tuple(seq[::-1] for seq in seqs)
else:
return seqs
class Sequence(Model):
def __init__(self, name, gate_type, backwards, *args,
dropout=0, trainable_initial=False, offset=0, **kwargs):
super().__init__(name)
self.backwards = backwards
self.trainable_initial = trainable_initial
self.offset = offset
self._step_fun = None
self._attention_u_fun = None
self.add(Dropout('dropout', dropout))
self.add(gate_type('gate', *args, **kwargs))
if self.trainable_initial:
for state in range(self.gate.n_states):
self.param('state_%d_0' % state, (self.gate.state_dims,),
init_f=init.Gaussian(fan_in=self.gate.state_dims))
def step(self, inputs, inputs_mask, *args):
states_tm1 = args[:self.gate.n_states]
h_mask = args[self.gate.n_states]
non_sequences = args[self.gate.n_states+1:]
# TODO: currently assume that dropout is applied only to states[0]
# through h_mask (which is passed through non_sequences and
# constant at each time step)
if self.gate.use_attention:
# attended is the
# src_sequence_length x batch_size x attention_dims
# matrix which we have attention on.
#
# attended_dot_u is the h_t-independent part of the final
# attention vectors, which is precomputed for efficiency.
#
# attention_mask is a binary mask over the valid elements of
# attended, which in practice is the same as the mask passed to
# the encoder that created attended. Size
# src_sequence_length x batch_size
states_attention = self.gate(
inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]),
attended=non_sequences[0],
attended_dot_u=non_sequences[1],
attention_mask=non_sequences[2])
states_t = states_attention[:-1]
attention = states_attention[-1]
return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1)
) + (attention,)
else:
states_t = self.gate(
inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]))
return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1))
def step_fun(self):
if self._step_fun is None:
inputs = T.matrix('inputs')
states_tm1 = [T.matrix('state_%d_tm1' % state)
for state in range(self.gate.n_states)]
if self.gate.use_attention:
attended=T.tensor3('attended')
attended_dot_u=T.tensor3('attended_dot_u')
attention_mask=T.matrix('attention_mask')
self._step_fun = function(
[inputs] + states_tm1 + [
attended, attended_dot_u, attention_mask],
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0]),
attended, attended_dot_u,
attention_mask])),
name='%s_step_fun'%self.name)
else:
self._step_fun = function(
[inputs] + states_tm1,
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0])])),
name='%s_step_fun'%self.name)
return self._step_fun
def attention_u_fun(self):
assert self.gate.use_attention
if self._attention_u_fun is None:
attended = T.tensor3('attended')
self._attention_u_fun = function(
[attended], self.gate.attention_u(attended),
name='%s_attention_u_fun'%self.name)
return self._attention_u_fun
def search(self, predict_fun, embeddings,
start_symbol, stop_symbol, max_length,
states_0=None, attended=None, attention_mask=None,
fixed=None,
beam_size=4):
if self.gate.use_attention:
attended_dot_u = self.attention_u_fun()(attended)
if self.trainable_initial:
if states_0 is None:
states_0 = [
getattr(self, '_state_%d_0' % state).get_value()[None,:]
for state in range(self.gate.n_states)]
def step(i, states, outputs, outputs_mask):
inputs = embeddings[outputs[-1]]
# TODO: is this the best way to add extra arguments?
if fixed is not None:
inputs = np.concatenate(
[inputs, fixed[None,:].repeat(0, axis=-1)],
axis=-1)
if self.gate.use_attention:
result = self.step_fun()(
*([inputs] + states + [
attended, attended_dot_u, attention_mask]))
else:
result = self.step_fun()(
*([inputs] + states))
states = result[:self.gate.n_states]
# NOTE: state[0] hard-coded
return states, predict_fun(states[0])
return search.beam(
step, states_0, states_0[0].shape[0],
start_symbol, stop_symbol,
max_length, beam_size=beam_size)
def __call__(self, inputs, inputs_mask, states_0=None,
attended=None, attention_mask=None):
if self.trainable_initial:
batch_size = inputs.shape[1]
if states_0 is None:
states_0 = [
expand_to_batch(getattr(self, '_state_%d_0' % state),
batch_size)
for state in range(self.gate.n_states)]
attention_info = []
if self.gate.use_attention:
attention_info = [attended, self.gate.attention_u(attended),
attention_mask]
dropout_masks = [self.dropout.mask(states_0[0].shape)]
seqs, _ = theano.scan(
fn=self.step,
go_backwards=self.backwards,
sequences=[{'input': inputs, 'taps': [self.offset]},
{'input': inputs_mask, 'taps': [self.offset]}],
outputs_info=list(states_0) + \
[None]*(1 if self.gate.use_attention else 0),
non_sequences=dropout_masks + attention_info + \
self.gate.parameters_list())
if self.backwards:
return tuple(seq[::-1] for seq in seqs)
else:
return seqs
# TODO: need to re-think how to handle attention in stacked models
class StackedSequence(Model):
def __init__(self, name, gate_type, backwards, n_layers,
input_dims, state_dims, *args,
dropout=0, trainable_initial=False, offset=0,
use_attention=False,
layer_fixed_size=None, **kwargs):
super().__init__(name)
self.backwards = backwards
self.trainable_initial = trainable_initial
self.offset = offset
self.n_layers = n_layers
self.layer_fixed_size = layer_fixed_size
self._step_fun = None
self._attention_u_fun = None
self.add(Dropout('dropout', dropout))
self.gates = []
for layer in range(n_layers):
total_input_dims = state_dims
if layer == 0:
total_input_dims += input_dims
if layer_fixed_size is not None:
total_input_dims += layer_fixed_size[layer]
gate = gate_type(
'gate%d' % layer,
total_input_dims,
state_dims,
*args,
**kwargs)
self.add(gate)
self.gates.append(gate)
if self.trainable_initial:
for state in range(self.gate0.n_states):
self.param('state_%d_%d_0' % (layer, state),
(self.gate0.state_dims,),
init_f=init.Gaussian(
fan_in=self.gate0.state_dims))
def step(self, inputs, inputs_mask, *args):
total_states = self.gate0.n_states*self.n_layers
layer_states_tm1 = [
args[layer*self.gate0.n_states:(layer+1)*self.gate0.n_states]
for layer in range(self.n_layers)]
n = total_states
h_mask = args[n]
n += 1
layer_fixed = None
if self.layer_fixed_size is not None:
layer_fixed = args[n:n+self.n_layers+1]
n += self.n_layers+1
non_sequences = args[n:]
layer_states_t = []
#states_tm1 = args[:self.gate.n_states]
#h_mask = args[self.gate.n_states]
#non_sequences = args[self.gate.n_states+1:]
# TODO: currently assume that dropout is applied only to states[0]
# through h_mask (which is passed through non_sequences and
# constant at each time step)
if self.gates[-1].use_attention:
raise NotImplementedError('Stacked RNN with attention')
# attended is the
# src_sequence_length x batch_size x attention_dims
# matrix which we have attention on.
#
# attended_dot_u is the h_t-independent part of the final
# attention vectors, which is precomputed for efficiency.
#
# attention_mask is a binary mask over the valid elements of
# attended, which in practice is the same as the mask passed to
# the encoder that created attended. Size
# src_sequence_length x batch_size
states_attention = self.gate(
inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]),
attended=non_sequences[0],
attended_dot_u=non_sequences[1],
attention_mask=non_sequences[2])
states_t = states_attention[:-1]
attention = states_attention[-1]
return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1)
) + (attention,)
else:
for layer in range(self.n_layers):
states_tm1 = layer_states_tm1[layer]
total_inputs = inputs if layer == 0 else layer_states_t[-1][0]
if layer_fixed is not None:
total_inputs = T.concatenate(
[total_inputs, layer_fixed[layer].repeat(
inputs.shape[0], axis=0)],
axis=-1)
states_t = getattr(self, 'gate%d' % layer)(
total_inputs,
*((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
states_tm1[1:]))
layer_states_t.append(states_t)
return tuple(
T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
for states_t, states_tm1 in zip(
layer_states_t,
layer_states_tm1)
for s_t, s_tm1 in zip(states_t, states_tm1))
#states_t = self.gate(
# inputs,
# *((states_tm1[0] * h_mask.astype(theano.config.floatX),) +
# states_tm1[1:]))
#return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1)
# for s_t, s_tm1 in zip(states_t, states_tm1))
def step_fun(self):
if self._step_fun is None:
inputs = T.matrix('inputs')
states_tm1 = [T.matrix('state_%d_%d_tm1' % (layer, state))
for layer in range(self.n_layers)
for state in range(self.gate0.n_states)]
if self.gates[-1].use_attention:
raise NotImplementedError('Stacked RNN with attention')
attended=T.tensor3('attended')
attended_dot_u=T.tensor3('attended_dot_u')
attention_mask=T.matrix('attention_mask')
self._step_fun = function(
[inputs] + states_tm1 + [
attended, attended_dot_u, attention_mask],
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0]),
attended, attended_dot_u,
attention_mask])),
name='%s_step_fun'%self.name)
else:
self._step_fun = function(
[inputs] + states_tm1,
self.step(*([inputs, T.ones(inputs.shape[:-1])] +
states_tm1 + [T.ones_like(states_tm1[0])])),
name='%s_step_fun'%self.name)
return self._step_fun
def attention_u_fun(self):
assert self.gates[-1].use_attention
if self._attention_u_fun is None:
attended = T.tensor3('attended')
self._attention_u_fun = function(
[attended], self.gates[-1].attention_u(attended),
name='%s_attention_u_fun'%self.name)
return self._attention_u_fun
def search(self, predict_fun, embeddings,
start_symbol, stop_symbol, max_length,
layer_states_0=None, attended=None, attention_mask=None,
layer_fixed=None,
beam_size=4):
if self.gates[-1].use_attention:
attended_dot_u = self.attention_u_fun()(attended)
if self.trainable_initial:
if layer_states_0 is None:
layer_states_0 = [
getattr(self, '_state_%d_%d_0' % state).get_value()[None,:]
for layer in range(self.n_layers)
for state in range(self.gate0.n_states)]
def step(i, states, outputs, outputs_mask):
inputs = embeddings[outputs[-1]]
# TODO: need to give sizes of fixed arguments ...
# TODO: is this the best way to add extra arguments?
if layer_fixed is not None and layer_fixed[0] is not None:
# TODO: wasn't this buggy anyway? Why repeat(0, ...) ?
inputs = np.concatenate(
[inputs, layer_fixed[0][None,:]],
axis=-1)
if self.gates[-1].use_attention:
raise NotImplementedError('Stacked RNN with attention')
result = self.step_fun()(
*([inputs] + states + [
attended, attended_dot_u, attention_mask]))
else:
result = self.step_fun()(
*([inputs] + states))
states = result[:self.n_layers*self.gate0.n_states]
# NOTE: state[0] of the last layer hard-coded
return states, predict_fun(
states[(self.n_layers-1)*self.gate0.n_states])
return search.beam(
step, layer_states_0, layer_states_0[0][0].shape[0],
start_symbol, stop_symbol,
max_length, beam_size=beam_size)
def __call__(self, inputs, inputs_mask, layer_states_0=None,
attended=None, attention_mask=None):
if self.trainable_initial:
batch_size = inputs.shape[1]
if layer_states_0 is None:
layer_states_0 = [
expand_to_batch(getattr(self, '_state_%d_%d_0' % (
layer, state)),
batch_size)
for layer in range(self.n_layers)
for state in range(self.gate0.n_states)]
attention_info = []
if self.gates[-1].use_attention:
attention_info = [attended, self.gates[-1].attention_u(attended),
attention_mask]
dropout_masks = [self.dropout.mask(layer_states_0[0].shape)]
seqs, _ = theano.scan(
fn=self.step,
go_backwards=self.backwards,
sequences=[{'input': inputs, 'taps': [self.offset]},
{'input': inputs_mask, 'taps': [self.offset]}],
outputs_info=list(layer_states_0) + \
[None]*(1 if self.gate0.use_attention else 0),
non_sequences=dropout_masks + attention_info + \
sum([gate.parameters_list()
for gate in self.gates], []))
if self.backwards:
return tuple(seq[::-1] for seq in seqs)
else:
return seqs
class Dropout(Model):
"""Dropout layer.
name : str
Name of layer.
dropout : float
Dropout factor (equivalent to 1 - retention probability)
sequence : bool
If True, dropout is not performed on the last dimension. This is
useful for e.g. embedded symbol sequences, where either a symbol is
kept intact or it is completely zeroed out.
"""
def __init__(self, name, dropout, sequence=False):
super().__init__(name)
self.p = 1.0 - dropout
self.rng = RandomStreams()
self.sequence = sequence
def mask(self, shape):
"""Return a scaled mask for a (symbolic) shape.
This can be used for dropout in recurrent layers, where a fixed mask
is passed through the non_sequences argument to theano.scan().
"""
if self.p == 1: return T.ones(shape)
if self.sequence:
m = T.shape_padright(self.rng.binomial(shape[:-1], p=self.p)
).astype(theano.config.floatX)
else:
m = self.rng.binomial(shape, p=self.p).astype(theano.config.floatX)
return m / self.p
def __call__(self, inputs):
if self.p == 1: return inputs
m = self.mask(inputs.shape)
return ifelse(train_mode, inputs * m, inputs)
class LayerNormalization(Model):
"""Layer Normalization (Ba, Kiros and Hinton 2016)."""
def __init__(self, name, inputs_shape, g_init=None, axis=-1, epsilon=1e-6):
super().__init__(name)
self.inputs_shape = inputs_shape
self.axis = axis
self.epsilon = epsilon
if g_init is None: g_init = init.Constant(1.0)
self.param('g', (inputs_shape[self.axis],), init_f=g_init)
def __call__(self, inputs):
broadcast = ['x']*len(self.inputs_shape)
broadcast[self.axis] = 0
mean = inputs.mean(axis=self.axis, keepdims=True).astype(
theano.config.floatX)
std = inputs.std(axis=self.axis, keepdims=True).astype(
theano.config.floatX)
normed = (inputs - mean) / (std + self.epsilon)
return normed * self._g.dimshuffle(*broadcast)
class LinearSelection(Model):
def __init__(self, name, input_dims, output_dims, selector_dims,
parallel_dims,
w=None, w_init=None, w_regularizer=None,
b=None, b_init=None, b_regularizer=None,
sw=None, sw_init=None,
sb=None, sb_init=None,
input_select=False,
use_bias=True, dropout=0, layernorm=False):
super().__init__(name)
self.input_dims = input_dims
self.output_dims = output_dims
self.selector_dims = selector_dims
self.parallel_dims = parallel_dims
self.use_bias = use_bias
self.dropout = dropout
self.layernorm = layernorm
self.input_select = input_select
s_dims = selector_dims + (input_dims if input_select else 0)
if w_init is None: w_init = init.Gaussian(fan_in=input_dims)
if b_init is None: b_init = init.Constant(0.0)
if sw_init is None: sw_init = init.Gaussian(fan_in=s_dims)
if sb_init is None: sb_init = init.Constant(0.0)
self.param('w', (input_dims, output_dims*parallel_dims),
init_f=w_init, value=w)
self.regularize(self._w, w_regularizer)
if use_bias:
self.param('b', (output_dims*parallel_dims,),
init_f=b_init, value=b)
self.regularize(self._b, b_regularizer)
self.param('sw', (s_dims, output_dims*parallel_dims),
init_f=sw_init)
self.param('sb', (output_dims*parallel_dims,),
init_f=sb_init)
if dropout:
self.add(Dropout('dropout', dropout))
if layernorm:
self.add(LayerNormalization('ln', (None, output_dims)))
def __call__(self, inputs, selector, sequence=False):
par = T.dot(inputs, self._w)
if self.use_bias: par = par + self._b
if sequence:
par = par.reshape((par.shape[0], par.shape[1],
self.output_dims, self.parallel_dims))
else:
par = par.reshape((par.shape[0],
self.output_dims, self.parallel_dims))
# Note that par might be a 3D or 4D tensor, while sel is always 3D
if self.input_select and sequence:
# ...except if we condition on the input
selector = T.concatenate([
inputs,
T.repeat(selector.dimshuffle('x',0,1), inputs.shape[0],
axis=0)],
axis=-1)
sel = T.dot(selector, self._sw) + self._sb
sel = sel.reshape(
(sel.shape[0], sel.shape[1],
self.output_dims, self.parallel_dims))
sel = softmax_4d(sel)
outputs = (par * sel).sum(axis=-1)
else:
if self.input_select:
selector = T.concatenate([inputs, selector], axis=-1)
sel = T.dot(selector, self._sw) + self._sb
sel = sel.reshape(
(sel.shape[0], self.output_dims, self.parallel_dims))
sel = softmax_3d(sel)
if sequence:
outputs = (par * sel.dimshuffle('x',0,1,2)).sum(axis=-1)
else:
outputs = (par * sel).sum(axis=-1)
if self.layernorm: outputs = self.ln(outputs)
if self.dropout: outputs = self.dropout(outputs)
return outputs
|
gpl-3.0
| -8,664,384,897,296,081,000
| 40.040388
| 85
| 0.531078
| false
| 3.942578
| false
| false
| false
|
cyliustack/sofa
|
bin/sofa_analyze.py
|
1
|
50661
|
import argparse
import matplotlib
matplotlib.use('agg')
import csv
import json
import multiprocessing as mp
import os
import random
import re
import sys
from functools import partial
from operator import attrgetter, itemgetter
import networkx as nx
import numpy as np
import pandas as pd
import time
from sofa_aisi import *
from sofa_common import *
from sofa_config import *
from sofa_print import *
from matplotlib import pyplot as plt
import grpc
import potato_pb2
import potato_pb2_grpc
import socket
import random
import subprocess
from sofa_ml import hsg_v2
def random_generate_color():
rand = lambda: random.randint(0, 255)
return '#%02X%02X%02X' % (64, rand(), rand())
def get_top_k_events(cfg, df, topk):
topk_events=[]
gby = df.groupby(['name'])
df_agg = gby.aggregate(np.sum)
df_agg_sorted = df_agg.sort_values(by=['duration'],ascending=False)
#memcpy = ['copyKind_1_','copyKind_2_','copyKind_8_']
if cfg.verbose:
print("Top %d Events: "%topk)
print(df_agg_sorted[['duration']][0:topk])
eventName = df_agg_sorted[df_agg_sorted.columns[0:0]].head(topk).index.values.tolist()
return eventName
# input: pfv(performance feature vector), Pandas.DataFrame
# output: hint, docker_image
def get_hint(potato_server, features):
if len(features) > 0:
pfv = potato_pb2.PerformanceFeatureVector()
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
#print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
pfv.name.append(name)
pfv.value.append(value)
#print('Wait for response from POTATO server...')
myhostname = socket.gethostname()
channel = grpc.insecure_channel(potato_server)
stub = potato_pb2_grpc.HintStub(channel)
request = potato_pb2.HintRequest( hostname = myhostname,
pfv = pfv)
response = stub.Hint(request)
hint = response.hint
docker_image = response.docker_image
else:
hint = 'There is no pfv to get hints.'
docker_image = 'NA'
return hint, docker_image
def concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features):
if cfg.verbose:
print_title('Concurrency Breakdown Analysis')
total_elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
elapsed_time_ratio = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
total_interval_vector = []
total_performace_vector = []
if len(df_mpstat) == 0:
print_warning(cfg, 'no mpstat and perf traces!')
return features
t_begin = df_mpstat.iloc[0]['timestamp']
t_end = df_mpstat.iloc[-1]['timestamp']
t = t_begin
sample_time = (1 / float(cfg.sys_mon_rate))
while t < t_end:
t = t + sample_time
if cfg.roi_end > 0 and (t < cfg.roi_begin or t > cfg.roi_end):
continue
window_begin = t - sample_time
window_end = t
if len(df_cpu) > 0:
if df_cpu.iloc[0].timestamp > window_end:
continue
cond1 = (df_cpu['timestamp'] > window_begin)
cond2 = (df_cpu['timestamp'] <= window_end)
df_cpu_interval = df_cpu[ cond1 & cond2 ]
num_gpus = len(list(set(df_nvsmi['deviceId'])))
cond1 = (df_nvsmi['timestamp'] > window_begin)
cond2 = (df_nvsmi['timestamp'] <= window_end)
sm = df_nvsmi['event'] == int(0)
df_nvsmi_interval = df_nvsmi[ cond1 & cond2 & sm ]
cond1 = (df_mpstat['timestamp'] > window_begin)
cond2 = (df_mpstat['timestamp'] <= window_end)
df_mpstat_interval = df_mpstat[ cond1 & cond2 ]
cond1 = (df_bandwidth['timestamp'] > window_begin)
cond2 = (df_bandwidth['timestamp'] <= window_end)
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
df_tx_interval = df_bandwidth[ cond1 & cond2 & tx ]
df_rx_interval = df_bandwidth[ cond1 & cond2 & rx ]
mp_usr = []
mp_sys = []
mp_idl = []
mp_iow = []
usr = []
sys = []
irq = []
cpu_max = 0
cpu_min = 100
for i in range(len(df_mpstat_interval)):
ratios = df_mpstat_interval.iloc[i]['name'].split(':')[1].split('|')
#print(ratios)
mp_usr.append(sample_time*int(ratios[1])/100.0)
mp_sys.append(sample_time*int(ratios[2])/100.0)
mp_idl.append(sample_time*int(ratios[3])/100.0)
mp_iow.append(sample_time*int(ratios[4])/100.0)
usr.append(int(ratios[1]))
sys.append(int(ratios[2]))
irq.append(int(ratios[5]))
cpu_tmp = int(ratios[1]) + int(ratios[2]) + int(ratios[5])
if cpu_tmp > cpu_max:
cpu_max = cpu_tmp
if cpu_tmp < cpu_min:
cpu_min = cpu_tmp
mp_usr = np.asarray(mp_usr)
mp_sys = np.asarray(mp_sys)
mp_idl = np.asarray(mp_idl)
mp_iow = np.asarray(mp_iow)
usr = np.asarray(usr)
sys = np.asarray(sys)
irq = np.asarray(irq)
elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0}
if len(df_mpstat_interval) > 0:
elapsed_time['usr'] = mp_usr.max()
elapsed_time['sys'] = mp_sys.max()
elapsed_time['gpu'] = df_nvsmi_interval['duration'].max() * 0.01 * sample_time
elapsed_time['iow'] = mp_iow.max()
#print('gput,usrt = ', elapsed_time['gpu'], elapsed_time['usr'])
dominator = max(elapsed_time, key=elapsed_time.get)
#if elapsed_time['gpu'] > 0.1 :
# dominator = 'gpu'
if elapsed_time[dominator] > sample_time * int(cfg.is_idle_threshold)/100:
total_elapsed_time[dominator] = total_elapsed_time[dominator] + sample_time
else:
total_elapsed_time['idl'] += sample_time
if num_gpus > 0:
time_gpu_avg = df_nvsmi_interval['duration'].sum() * 0.01 * sample_time / num_gpus
else:
time_gpu_avg = 0
interval_vector = [mp_usr.max(),
mp_sys.max(),
mp_iow.max(),
mp_idl.max(),
time_gpu_avg,
df_tx_interval['bandwidth'].sum(),
df_rx_interval['bandwidth'].sum()]
total_interval_vector.append(tuple(interval_vector))
if num_gpus > 0:
sm_avg = df_nvsmi_interval['duration'].sum() / int(len(list(set(df_nvsmi_interval['deviceId']))))
else:
sm_avg = 0
performace_vector = [window_end,
df_nvsmi_interval['duration'].max(),
sm_avg,
df_nvsmi_interval['duration'].min(),
round((usr.mean() + sys.mean() + irq.mean()), 0),
cpu_max,
cpu_min]
total_performace_vector.append(tuple(performace_vector))
total_all_elapsed_time = sum(total_elapsed_time.values())
if total_all_elapsed_time > 0 :
elapsed_time_ratio['usr'] = 100 * total_elapsed_time['usr'] / total_all_elapsed_time
elapsed_time_ratio['sys'] = 100 * total_elapsed_time['sys'] / total_all_elapsed_time
elapsed_time_ratio['gpu'] = 100 * total_elapsed_time['gpu'] / total_all_elapsed_time
elapsed_time_ratio['idl'] = 100 * total_elapsed_time['idl'] / total_all_elapsed_time
elapsed_time_ratio['iow'] = 100 * total_elapsed_time['iow'] / total_all_elapsed_time
if cfg.verbose:
print('Elapsed Time = %.1lf ' % total_all_elapsed_time)
print('USR = %.1lf %%' % elapsed_time_ratio['usr'])
print('SYS = %.1lf %%' % elapsed_time_ratio['sys'])
if num_gpus > 0:
print('GPU = %.1lf %%' % elapsed_time_ratio['gpu'])
print('IDL = %.1lf %%' % elapsed_time_ratio['idl'])
print('IOW = %.1lf %%' % elapsed_time_ratio['iow'])
if cfg.spotlight_gpu:
elapsed_hotspot_time = cfg.roi_end - cfg.roi_begin
else:
elapsed_hotspot_time = 0
df = pd.DataFrame({ 'name':['elapsed_usr_time_ratio', 'elapsed_sys_time_ratio', 'elapsed_gpu_time_ratio',
'elapsed_iow_time_ratio', 'elapsed_hotspot_time'],
'value':[elapsed_time_ratio['usr'], elapsed_time_ratio['sys'], elapsed_time_ratio['gpu'],
elapsed_time_ratio['iow'], elapsed_hotspot_time ] },
columns=['name','value'])
features = pd.concat([features, df])
if len(total_performace_vector) > 0:
performance_table = pd.DataFrame(total_performace_vector, columns = ['time', 'max_gpu_util', 'avg_gpu_util', 'min_gpu_util', 'cpu_util', 'cpu_max', 'cpu_min'])
performance_table.to_csv('%s/performance.csv' % logdir)
vector_table = pd.DataFrame(total_interval_vector, columns = ['usr' , 'sys', 'iow', 'idl','gpu', 'net_tx', 'net_rx'])
pearson = vector_table.corr(method ='pearson').round(2)
if cfg.verbose:
print('Correlation Table :')
print(pearson)
df = pd.DataFrame({ 'name':['corr_gpu_usr', 'corr_gpu_sys', 'corr_gpu_iow', 'corr_gpu_ntx', 'corr_gpu_nrx'], 'value':[pearson['gpu'].usr, pearson['gpu'].sys, pearson['gpu'].iow, pearson['gpu'].net_tx, pearson['gpu'].net_rx]}, columns=['name','value'])
features = pd.concat([features, df])
return features
def payload_sum(df):
print((len(df)))
class Event:
def __init__(self, name, ttype, timestamp, duration):
self.name = name
self.ttype = ttype # 0 for begin, 1 for end
self.timestamp = timestamp
self.duration = duration
def __repr__(self):
return repr((self.name, self.ttype, self.timestamp, self.duration))
def nvsmi_profile(logdir, cfg, df_nvsmi, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('SM & MEM & ENCODE/DECODE Profiling')
if cfg.spotlight_gpu:
if cfg.roi_end == 0 :
print_warning(cfg, 'spotlight_gpu has no effects.')
else:
cond1 = (df_nvsmi['timestamp'] > cfg.roi_begin)
cond2 = (df_nvsmi['timestamp'] <= cfg.roi_end)
df_nvsmi = df_nvsmi[ cond1 & cond2 ]
sm_start = df_nvsmi.iloc[0].timestamp
sm_end = df_nvsmi.iloc[-1].timestamp
SM_time = sm_end - sm_start
result = df_nvsmi.groupby(['deviceId','event'])['duration'].mean()
result = result.astype(int)
gpu_sm_util = df_nvsmi.groupby(['event'])['duration'].mean()[0]
gpu_mem_util = df_nvsmi.groupby(['event'])['duration'].mean()[1]
if cfg.nvsmi_data:
gpu_enc_util = df_nvsmi.groupby(['event'])['duration'].mean()[2]
gpu_dec_util = df_nvsmi.groupby(['event'])['duration'].mean()[3]
else:
gpu_enc_util = 0
gpu_dec_util = 0
sm = df_nvsmi['event'] == int(0)
mem = df_nvsmi['event'] == int(1)
enc = df_nvsmi['event'] == int(2)
dec = df_nvsmi['event'] == int(3)
gpunum = list(set(df_nvsmi['deviceId']))
res = pd.DataFrame([], columns=['sm', 'mem', 'enc', 'dec'])
sm_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
mem_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
for i in gpunum:
gpuid = df_nvsmi['deviceId'] == int(i)
gpudata = [round(df_nvsmi[sm & gpuid]['duration'].mean(), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2),
round(df_nvsmi[enc & gpuid]['duration'].mean(), 2),
round(df_nvsmi[dec & gpuid]['duration'].mean(), 2)]
smdata = [round(df_nvsmi[sm & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[sm & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[sm & gpuid]['duration'].mean(), 2)]
memdata = [round(df_nvsmi[mem & gpuid]['duration'].quantile(0.25), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.5), 2),
round(df_nvsmi[mem & gpuid]['duration'].quantile(0.75), 2),
round(df_nvsmi[mem & gpuid]['duration'].mean(), 2)]
gpu_tmp = pd.DataFrame([gpudata], columns=['sm', 'mem', 'enc', 'dec'], index=[i])
sm_tmp = pd.DataFrame([smdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
mem_tmp = pd.DataFrame([memdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i])
res = pd.concat([res, gpu_tmp])
sm_q = pd.concat([sm_q, sm_tmp])
mem_q = pd.concat([mem_q, mem_tmp])
res.index.name = 'gpu_id'
sm_q.index.name = 'gpu_id'
mem_q.index.name = 'gpu_id'
if not cfg.cluster_ip and cfg.verbose:
print('GPU Utilization (%):')
print(res)
print('\nGPU SM Quartile (%):')
print(sm_q)
print('\nGPU MEM Quartile (%):')
print(mem_q)
print('Overall Average SM Utilization (%): ', int(gpu_sm_util))
print('Overall Average MEM Utilization (%): ', int(gpu_mem_util))
print('Overall Average ENC Utilization (%): ', int(gpu_enc_util))
print('Overall Average DEC Utilization (%): ', int(gpu_dec_util))
print('Overall Active GPU Time (s): %.3lf' % (SM_time * gpu_sm_util/100.0))
df = pd.DataFrame({'name':['gpu_sm_util_q2', 'gpu_sm_util_q3', 'gpu_sm_util', 'gpu_mem_util_q2', 'gpu_mem_util_q3', 'gpu_mem_util'],
'value':[df_nvsmi[sm & gpuid]['duration'].quantile(0.5),
df_nvsmi[sm & gpuid]['duration'].quantile(0.75),
int(gpu_sm_util),
df_nvsmi[mem & gpuid]['duration'].quantile(0.5),
df_nvsmi[mem & gpuid]['duration'].quantile(0.75),
int(gpu_mem_util),
]},
columns=['name','value'])
features = pd.concat([features, df])
return features
def gpu_profile(logdir, cfg, df_gpu, features):
if cfg.verbose:
print_title('GPU Profiling')
print('Per-GPU time (s):')
groups = df_gpu.groupby("deviceId")["duration"]
gpu_time = 0
for key, item in groups:
gpuid = int(float(key))
per_gpu_time = groups.get_group(key).sum()
if cfg.verbose:
print("[%d]: %lf" % (gpuid, per_gpu_time))
gpu_time = gpu_time + per_gpu_time
num_gpus = len(groups)
kernel_time = 0
grouped_df = df_gpu.groupby("copyKind")["duration"]
for key, item in grouped_df:
if key == 0:
kernel_time = grouped_df.get_group(key).sum()
nccl_time = 0
grouped_df = df_gpu.groupby("name")["duration"]
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("nccl") != -1:
nccl_time = nccl_time + grouped_df.get_group(key).sum()
features = comm_profile(logdir, cfg, df_gpu, features)
get_top_k_events(cfg, df_gpu, 10)
df = pd.DataFrame({'name':['gpu_time', 'num_gpus', 'kernel_time', 'nccl_time'],
'value':[gpu_time, num_gpus, kernel_time, nccl_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def strace_profile(logdir, cfg, df, features):
print_title('STRACE Profiling:')
return features
def net_profile(logdir, cfg, df, features):
if not cfg.cluster_ip:
print_title("Network Profiling:")
grouped_df = df.groupby("name")["duration"]
net_time = 0
n_packets = 0
for key, item in grouped_df:
#print("[%s]: %lf" % (key, grouped_df.get_group(key).sum()))
if key.find("network:tcp:") != -1:
net_time = net_time + grouped_df.get_group(key).sum()
n_packets = n_packets + 1
#print(("total network time (s) = %.3lf" % net_time))
#print(("total amount of network packets = %d" % n_packets))
# total network packet
packet_num_matrix = df.groupby(['pkt_src','pkt_dst','payload']).size().unstack(level=1, fill_value=0)
# total network traffic
packet_sum_matrix = df.groupby(['pkt_src','pkt_dst'])["payload"].sum().unstack(level=1, fill_value=0)
# ================ change pandas table columns and index name ====
rename_index = packet_sum_matrix.index.tolist()
rename_index2 = packet_num_matrix.index.tolist()
rename_columns = packet_sum_matrix.columns.tolist()
rename_columns2 = packet_num_matrix.columns.tolist()
def zero(s):
if s[0:2] == '00':
s = s[2]
elif (s[0] == '0') and (s[1] != '0'):
s = s[1:3]
return(s)
def check_str(rename_list):
rename_list_new = []
for j in rename_list:
j = str(int(j))
a = j[-9:-6]
b = j[-6:-3]
c = j[-3:]
j = j[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_list_new.append(j)
return(rename_list_new)
def check_str2(rename_list):
rename_columns_2 = []
for i in rename_list:
i = str(int(i[0]))
a = i[-9:-6]
b = i[-6:-3]
c = i[-3:]
i = i[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c)
rename_columns_2.append(i)
return(rename_columns_2)
rename_index_new = check_str(rename_index)
rename_index_new = dict(zip(rename_index, rename_index_new))
rename_index2_new = check_str2(rename_index2)
rename_index2_final = list(set(rename_index2_new))
rename_index2_final.sort(key=rename_index2_new.index)
rename_columns_new = check_str(rename_columns)
rename_columns_new = dict(zip(rename_columns, rename_columns_new))
rename_columns2_new = check_str(rename_columns2)
rename_columns2_new = dict(zip(rename_columns2, rename_columns2_new))
# rename here
packet_sum_matrix = packet_sum_matrix.rename(columns=rename_columns_new)
packet_num_matrix = packet_num_matrix.rename(columns=rename_columns2_new)
packet_sum_matrix = packet_sum_matrix.rename(index=rename_index_new)
packet_num_matrix.index.set_levels(rename_index2_final , level = 0, inplace = True)
if cfg.verbose:
print("total amount of network traffic : ", convertbyte(df['payload'].sum()), '\n', packet_sum_matrix.to_string(), "\n")
print("total amount of network packets = %d\n" % packet_num_matrix.sum().sum() ,packet_num_matrix.to_string(), "\n")
network_value = []
src = []
dst = []
final = []
for index in packet_sum_matrix.index:
for column in packet_sum_matrix.columns:
src.append(index)
dst.append(column)
network_value.append(packet_sum_matrix[column][index])
record = list(zip(src, dst, network_value))
record.sort(key=lambda tup:tup[2], reverse=True)
for src, dst, value in record:
if value == 0:
pass
else:
item = [src, dst, convertbyte(value), round(value / df['payload'].sum(), 2)]
final.append(item)
summary = pd.DataFrame(final, columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary.to_csv(logdir + 'netrank.csv',
mode='w',
header=True,
index=False)
df = pd.DataFrame({'name':['net_time'],
'value':[net_time] },
columns=['name','value'])
features = pd.concat([features, df])
return features
def convertbyte(B):
B = int(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{} Bytes'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB'.format(B/TB)
def convertbytes(B):
B = float(B)
KB = float(1024)
MB = float(KB ** 2) # 1,048,576
GB = float(KB ** 3) # 1,073,741,824
TB = float(KB ** 4) # 1,099,511,627,776
if B < KB:
return '{0:.2f} B/s'.format(B)
elif KB <= B < MB:
return '{0:.2f} KB/s'.format(B/KB)
elif MB <= B < GB:
return '{0:.2f} MB/s'.format(B/MB)
elif GB <= B < TB:
return '{0:.2f} GB/s'.format(B/GB)
elif TB <= B:
return '{0:.2f} TB/s'.format(B/TB)
def netbandwidth_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('Network Bandwidth Profiling:')
tx = df['event'] == float(0)
rx = df['event'] == float(1)
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
with open('%s/netstat.txt' % logdir) as f:
lines = f.readlines()
first_line = lines[0]
last_line = lines[-1]
tx_begin = first_line.split(',')[1]
rx_begin = first_line.split(',')[2]
tx_end = last_line.split(',')[1]
rx_end = last_line.split(',')[2]
tx_amount = int(last_line.split(',')[1]) - int(first_line.split(',')[1])
rx_amount = int(last_line.split(',')[2]) - int(first_line.split(',')[2])
if not cfg.cluster_ip:
bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25)
bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5)
bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75)
bw_tx_mean = int(df[tx]['bandwidth'].mean())
bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25)
bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5)
bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75)
bw_rx_mean = int(df[rx]['bandwidth'].mean())
if cfg.verbose:
print('Amount of Network Traffic : %s' % (convertbyte(tx_amount + rx_amount)))
print('Amount of tx : %s' % convertbyte(tx_amount))
print('Amount of rx : %s' % convertbyte(rx_amount))
print('Bandwidth Quartile :')
print('Q1 tx : %s, rx : %s' % ( convertbytes(bw_tx_q1), convertbytes(bw_rx_q1)))
print('Q2 tx : %s, rx : %s' % ( convertbytes(bw_tx_q2), convertbytes(bw_rx_q2)))
print('Q3 tx : %s, rx : %s' % ( convertbytes(bw_tx_q3), convertbytes(bw_rx_q3)))
print('Avg tx : %s, rx : %s'% ( convertbytes(bw_tx_mean), convertbytes(bw_rx_mean)))
#network chart part
all_time = df[tx]['timestamp'].tolist()
all_tx = df[tx]['bandwidth'].tolist()
all_rx = df[rx]['bandwidth'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(all_time, all_tx, c='red', alpha=0.5, label='tx')
plt.plot(all_time, all_rx, c='blue', alpha=0.5, label='rx')
plt.legend(loc='upper right')
plt.title("Network Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Bandwidth (bytes)", fontsize=16)
fig.savefig("%s/network_report.pdf" % logdir, bbox_inches='tight')
if not cfg.cluster_ip and cfg.verbose:
print('Network Bandwidth Chart is saved at %s/network_report.pdf' %logdir)
df_feature = pd.DataFrame({ 'name':['bw_tx_q2', 'bw_tx_q3', 'bw_rx_q2', 'bw_rx_q3'],
'value':[bw_tx_q2, bw_tx_q3, bw_rx_q2, bw_rx_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def blktrace_latency_profile(logdir, cfg, df, features):
with open('%s/btt.txt' % logdir) as f:
lines = f.readlines()
for i, line in enumerate(lines):
if '==================== All Devices ====================' in line:
start = i
if '==================== Device Merge Information ====================' in line:
end = i
break
bttoutput_result = lines[start:end]
df_offset = pd.read_table('%s/offset_all.txt' % logdir, delim_whitespace=True, names=('time', 'start', 'end'))
time = df_offset['time'].tolist()
start_b = df_offset['start'].tolist()
end_b = df_offset['end'].tolist()
fig = plt.figure(dpi=128, figsize=(16, 14))
plt.plot(time, start_b, c='red', marker='o', alpha=0.3, label='Start block')
plt.legend(loc='upper right')
plt.title("Block Offset Report", fontsize=18)
plt.xlabel('Timestamp (s)', fontsize=16)
plt.ylabel("Block Number", fontsize=16)
fig.savefig("%s/offset_of_device_report.pdf" % logdir, bbox_inches='tight')
print('Offset of Device Report is saved at %s/offset_of_device_report.pdf' %logdir)
if cfg.verbose:
print_title('Storage Profiling:')
print('Blktracae Latency (s):')
for btt in bttoutput_result:
print(btt[:-1])
blktrace_latency = df['event'] == 'C'
blktrace_latency_q1 = df[blktrace_latency]['duration'].quantile(0.25)
blktrace_latency_q2 = df[blktrace_latency]['duration'].quantile(0.5)
blktrace_latency_q3 = df[blktrace_latency]['duration'].quantile(0.75)
blktrace_latency_mean = df[blktrace_latency]['duration'].mean()
df_feature = pd.DataFrame({ 'name':['blktrace_latency_q1','blktrace_latency_q2','blktrace_latency_q3'],
'value': [blktrace_latency_q1, blktrace_latency_q2, blktrace_latency_q3] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def diskstat_profile(logdir, cfg, df, features):
#diskstat_dev = list(set(df['dev']))
diskstat_r_q1 = df.groupby('dev')['d_read'].quantile(0.25)
diskstat_w_q1 = df.groupby('dev')['d_write'].quantile(0.25)
diskstat_q1 = df.groupby('dev')['d_disk_total'].quantile(0.25)
diskstat_r_q2 = df.groupby('dev')['d_read'].quantile(0.5)
diskstat_w_q2 = df.groupby('dev')['d_write'].quantile(0.5)
diskstat_q2 = df.groupby('dev')['d_disk_total'].quantile(0.5)
diskstat_r_q3 = df.groupby('dev')['d_read'].quantile(0.75)
diskstat_w_q3 = df.groupby('dev')['d_write'].quantile(0.75)
diskstat_q3 = df.groupby('dev')['d_disk_total'].quantile(0.75)
diskstat_r_avg = df.groupby('dev')['d_read'].mean()
diskstat_w_avg = df.groupby('dev')['d_write'].mean()
diskstat_avg = df.groupby('dev')['d_disk_total'].mean()
diskstat_r_iops = df.groupby('dev')['r_iops'].mean()
diskstat_w_iops = df.groupby('dev')['w_iops'].mean()
diskstat_iops = df.groupby('dev')['iops'].mean()
diskstat_wait = df.groupby('dev')['await_time'].mean()
diskstat_table = pd.concat([diskstat_r_q1, diskstat_r_q2, diskstat_r_q3, diskstat_r_avg,
diskstat_w_q1, diskstat_w_q2, diskstat_w_q3, diskstat_w_avg,
diskstat_q1, diskstat_q2, diskstat_q3, diskstat_avg,
diskstat_r_iops, diskstat_w_iops, diskstat_iops,
diskstat_wait], axis=1, sort=False)
diskstat_columns = ['Q1 throughput(Read)', 'Q2 throughput(Read)', 'Q3 throughput(Read)', 'Avg throughput(Read)',
'Q1 throughput(Write)', 'Q2 throughput(Write)', 'Q3 throughput(Write)', 'Avg throughput(Write)',
'Q1 throughput(R+W)', 'Q2 throughput(R+W)', 'Q3 throughput(R+W)', 'Avg throughput(R+W)',
'Avg IOPS(Read)', 'Avg IOPS(Write)', 'Avg IOPS(R+W)', 'Avg Await time(ms)']
diskstat_table.columns = diskstat_columns
diskstat_dev = diskstat_table.index.format()
final_table = pd.DataFrame(columns=diskstat_columns)
for j, dev in enumerate(diskstat_dev):
tmp_list = []
for i in diskstat_columns[:-4]:
tmp_list.append(convertbytes(diskstat_table.iloc[j][i]))
for i in diskstat_columns[-4:-1]:
tmp_list.append('%d' % int(diskstat_table.iloc[j][i]))
tmp_list.append('%.3lf ms' % diskstat_table.iloc[j][-1])
tmp_table = pd.DataFrame([tuple(tmp_list)],
columns=diskstat_columns,
index=[dev])
final_table = pd.concat([final_table, tmp_table])
if cfg.verbose:
print_title('DISKSTAT Profiling:')
print('Disk Throughput Quartile :')
print(final_table.T)
df_feature = pd.DataFrame({ 'name':['diskstat_q1','diskstat_q2','diskstat_q3'],
'value': [diskstat_q1.mean(), diskstat_q2.mean(), diskstat_q3.mean()] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def cpu_profile(logdir, cfg, df):
if cfg.verbose:
print_title('CPU Profiling:')
print('elapsed_time (s) = %.6lf' % cfg.elapsed_time)
grouped_df = df.groupby("deviceId")["duration"]
total_exec_time = 0
for key, item in grouped_df:
print(("[%d]: %lf" % (key, grouped_df.get_group(key).sum())))
total_exec_time = total_exec_time + grouped_df.get_group(key).sum()
print("total execution time (s) = %.3lf" % total_exec_time)
cpu_detail_profile_df = df[['timestamp','duration','name']]
cpu_detail_profile_df = cpu_detail_profile_df.sort_values(by=['duration'], ascending=False)
cpu_detail_profile_df['ratio(%)'] = cpu_detail_profile_df['duration']/total_exec_time * 100
cpu_detail_profile_df = cpu_detail_profile_df[['timestamp','ratio(%)','duration','name']]
print(cpu_detail_profile_df[:20].to_string(index=False))
def vmstat_profile(logdir, cfg, df, features):
_,_,_,_,_,_,df['si'],df['so'],df['bi'],df['bo'],df['in'],df['cs'],_,_,_,_,_=df['name'].str.split('|').str
for col_name in ('si','so','bi','bo','in','cs'):
df[col_name] = df[col_name].str[3:]
vmstat_traces = df[['si','so','bi','bo','in','cs']].astype(float)
vm_bi = vmstat_traces['bi'].mean()
vm_bo = vmstat_traces['bo'].mean()
vm_cs = vmstat_traces['cs'].mean()
vm_in = vmstat_traces['in'].mean()
if cfg.verbose:
print_title('VMSTAT Profiling:')
print('average bi/s: %d' % int(vm_cs))
print('average bo/s: %d' % int(vm_in))
print('average cs/s: %d' % int(vm_bi))
print('average in/s: %d' % int(vm_bo))
df_feature = pd.DataFrame({ 'name':['vm_bi', 'vm_bo', 'vm_cs', 'vm_in' ],
'value':[vm_bi, vm_bo, vm_cs, vm_in] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def mpstat_profile(logdir, cfg, df, features):
if not cfg.cluster_ip and cfg.verbose:
print_title('MPSTAT Profiling:')
num_cores = int(df['deviceId'].max() + 1)
df_summary = pd.DataFrame( np.zeros((num_cores,5)), columns=['USR','SYS','IDL','IOW','IRQ'])
_,_,_,_,_,df['USR'],df['SYS'],df['IDL'],df['IOW'],df['IRQ'],_ = df["name"].str.split('|').str
df[['USR','SYS','IDL','IOW','IRQ']] = df[['USR','SYS','IDL','IOW','IRQ']].astype(float)
df["dt_all"] = np.where(df["IDL"]==100, 0.1, df["duration"]/((100-df["IDL"])/100.0))
df["t_USR"] = df['dt_all'] * df['USR']/100.0
df["t_SYS"] = df['dt_all'] * df['SYS']/100.0
df["t_IDL"] = df['dt_all'] * df['IDL']/100.0
df["t_IOW"] = df['dt_all'] * df['IOW']/100.0
df["t_IRQ"] = df['dt_all'] * df['IRQ']/100.0
dfs=[]
for i in range(num_cores):
dfs.append(df.loc[df['deviceId'] == float(i)])
for index,dff in enumerate(dfs):
df_summary.iloc[index]['USR'] = dff['t_USR'].sum()
df_summary.iloc[index]['SYS'] = dff['t_SYS'].sum()
df_summary.iloc[index]['IDL'] = dff['t_IDL'].sum()
df_summary.iloc[index]['IRQ'] = dff['t_IRQ'].sum()
df_summary.iloc[index]['IOW'] = dff['t_IOW'].sum()
if not cfg.cluster_ip and cfg.verbose:
print('CPU Utilization (%):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%3d\t%3d\t%3d\t%3d\t%3d'%(i,int(100.0*df_summary.iloc[i]['USR']/t_sum),
int(100.0*df_summary.iloc[i]['SYS']/t_sum),
int(100.0*df_summary.iloc[i]['IDL']/t_sum),
int(100.0*df_summary.iloc[i]['IOW']/t_sum),
int(100.0*df_summary.iloc[i]['IRQ']/t_sum) ))
if not cfg.cluster_ip and cfg.verbose:
print('CPU Time (s):')
print('core\tUSR\tSYS\tIDL\tIOW\tIRQ')
for i in range(len(df_summary)):
t_sum = df_summary.iloc[i].sum()
if not cfg.cluster_ip and cfg.verbose:
print('%3d\t%.2lf\t%.2lf\t%.2lf\t%.2lf\t%.2lf'%(i,
df_summary.iloc[i]['USR'],
df_summary.iloc[i]['SYS'],
df_summary.iloc[i]['IDL'],
df_summary.iloc[i]['IOW'],
df_summary.iloc[i]['IRQ'] ))
total_cpu_time = df_summary[['USR','SYS','IRQ']].sum().sum()
cpu_util = int(100*total_cpu_time / (num_cores*cfg.elapsed_time))
if not cfg.cluster_ip and cfg.verbose:
print('Active CPU Time (s): %.3lf' % total_cpu_time)
print('Active CPU ratio (%%): %3d' % cpu_util)
df_feature = pd.DataFrame({ 'name':['num_cores', 'cpu_util'],
'value':[num_cores, cpu_util] },
columns=['name','value'])
features = pd.concat([features, df_feature])
return features
def sofa_analyze(cfg):
print_main_progress('SOFA analyzing...')
filein = []
df_cpu = pd.DataFrame([], columns=cfg.columns)
df_gpu = pd.DataFrame([], columns=cfg.columns)
df_net = pd.DataFrame([], columns=cfg.columns)
df_mpstat = pd.DataFrame([], columns=cfg.columns)
df_vmstat = pd.DataFrame([], columns=cfg.columns)
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
df_blktrace = pd.DataFrame([], columns=cfg.columns)
df_diskstat = pd.DataFrame([], columns=cfg.columns)
df_nvsmi = pd.DataFrame([], columns=cfg.columns)
iter_summary = None
logdir = cfg.logdir
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
filein_gpu = logdir + "gputrace.csv"
filein_cpu = logdir + "cputrace.csv"
filein_net = logdir + "nettrace.csv"
filein_vmstat = logdir + "vmstat.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_strace = logdir + "strace.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
filein_blktrace = logdir + "blktrace.csv"
filein_diskstat = logdir + "diskstat_vector.csv"
if os.path.isfile('%s/nvlink_topo.txt' % logdir):
with open(logdir + 'nvlink_topo.txt') as f:
lines = f.readlines()
if len(lines) > 0:
title = lines[0]
num_gpus = 1
for word in title.split():
if re.match(r'GPU', word) != None :
num_gpus = num_gpus + 1
print_info(cfg,'# of GPUs: ' + str(num_gpus) )
edges = []
if len(lines) >= num_gpus+1:
for i in range(num_gpus):
connections = lines[1+i].split()
for j in range(len(connections)):
if connections[j] == 'NV1' or connections[j] == 'NV2':
edges.append((i,j-1))
#print('%d connects to %d' % (i, j-1))
ring_found = False
G = nx.DiGraph(edges)
# Try to find ring with its length of num_gpus
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus:
if cfg.verbose:
print('One of the recommended ring having length of %d' % len(cycle))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Try to find ring with its length of num_gpus/2
if not ring_found:
for cycle in nx.simple_cycles(G):
if len(cycle) == num_gpus/2:
print(("One of the recommended ring having length of %d" % len(cycle) ))
ring_found = True
os.system("mkdir -p sofalog/sofa_hints/")
xring_order = ','.join(map(str, cycle))
with open("sofalog/sofa_hints/xring_order.txt", "w") as f:
f.write('export CUDA_VISIBLE_DEVICES=' + xring_order)
break
# Construct Performance Features
features = pd.DataFrame({'name':['elapsed_time'], 'value':[cfg.elapsed_time]}, columns=['name','value'])
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
if not df_nvsmi.empty and cfg.spotlight_gpu:
state = 0
sm_high = 0
trigger = 10
for i in range(len(df_nvsmi)):
if df_nvsmi.iloc[i].event == 0 and df_nvsmi.iloc[i].deviceId == 0 :
if df_nvsmi.iloc[i].duration >= 50:
sm_high = min(trigger, sm_high + 1)
if df_nvsmi.iloc[i].duration < 10:
sm_high = max(0, sm_high - 1)
if state == 0 and sm_high == trigger:
state = 1
cfg.roi_begin = df_nvsmi.iloc[i].timestamp
elif state == 1 and sm_high == 0:
state = 0
cfg.roi_end = df_nvsmi.iloc[i].timestamp
#print('sm_high=%d state=%d' % (sm_high, state))
if cfg.roi_end - cfg.roi_begin < 0:
cfg.roi_end = 0
cfg.roi_begin = 0
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_cpu = pd.read_csv(filein_cpu)
if not df_cpu.empty:
if cfg.verbose:
cpu_profile(logdir, cfg, df_cpu)
if cfg.enable_swarms and len(df_cpu) > cfg.num_swarms:
df_cpu, swarms = hsg_v2(cfg, df_cpu)
except IOError as e:
df_cpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_cpu)
try:
df_strace = pd.read_csv(filein_strace)
if not df_strace.empty:
features = strace_profile(logdir, cfg, df_strace, features)
except IOError as e:
df_strace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_strace)
try:
df_net = pd.read_csv(filein_net)
if not df_net.empty:
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
if not df_bandwidth.empty:
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
try:
df_blktrace = pd.read_csv(filein_blktrace)
if not df_blktrace.empty:
features = blktrace_latency_profile(logdir, cfg, df_blktrace, features)
except IOError as e:
df_blktrace = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_blktrace)
try:
df_diskstat = pd.read_csv(filein_diskstat)
if not df_diskstat.empty:
features = diskstat_profile(logdir, cfg, df_diskstat, features)
except IOError as e:
df_diskstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_diskstat)
try:
df_vmstat = pd.read_csv(filein_vmstat)
if not df_vmstat.empty:
features = vmstat_profile(logdir, cfg, df_vmstat, features)
except IOError as e:
df_vmstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_vmstat)
try:
df_mpstat = pd.read_csv(filein_mpstat)
if not df_mpstat.empty:
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_gpu = pd.read_csv(filein_gpu)
if not df_gpu.empty:
features = gpu_profile(logdir, cfg, df_gpu, features)
except IOError:
df_gpu = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found. If there is no need to profile GPU, just ignore it." % filein_gpu)
try:
if len(df_mpstat)>0:
df_nvsmi.append(df_mpstat.iloc[0])
features = concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features)
except IOError as e:
print_warning(cfg, "Some files are not found, which are needed for concurrency_breakdown analysis")
if cfg.enable_aisi:
selected_pattern, iter_summary, features = sofa_aisi(logdir, cfg, df_cpu, df_gpu, df_strace, df_mpstat, features)
if 'IS_SOFA_ON_HAIHUB' not in os.environ or os.environ['IS_SOFA_ON_HAIHUB'] == 'no':
print_title('Final Performance Features')
print('%s%s%s%s' % ('ID'.ljust(10),'Feature'.ljust(30),'Value'.ljust(20),'Unit'.ljust(20)) )
for i in range(len(features)):
name = features.iloc[i]['name']
value = features.iloc[i]['value']
print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20)))
if cfg.spotlight_gpu:
try:
print('Elapsed hotspot time: %.3lf' % features[features.name=='elapsed_hotspot_time'].value)
except:
print_warning(cfg, 'elpased_hostspot_time is not defined.')
if cfg.potato_server:
if cfg.potato_server.find(':') == -1:
cfg.potato_server = cfg.potato_server + ':50051'
hint, docker_image = get_hint(cfg.potato_server, features)
df_report = pd.read_json(hint, orient='table')
file_potato_report = cfg.logdir + 'potato_report.html'
# Export report to HTML file.
df_report.to_html(file_potato_report )
with open(file_potato_report, 'a') as f:
f.write('<head><link rel=stylesheet type="text/css" href="potato_report.css"></head>')
print_title('POTATO Feedback')
print('%s%s%s%s' % ('ID'.ljust(5), 'Metric'.ljust(20), 'Value'.ljust(10), 'Reference-Value'.ljust(30) ) )
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
value = df_report.iloc[i]['Value']
ref_value = df_report.iloc[i]['ReferenceValue']
print('%s%s%s%s' % (str(i).ljust(5), metric.ljust(20), ('%.3lf'%value).ljust(20), str(ref_value).ljust(30)))
print('\n')
print_hint('General Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric != 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
print('\n')
print_hint('Framework-specific Optimization Suggestions:')
for i in range(len(df_report)):
metric = df_report.iloc[i]['Metric']
if metric == 'hybrid_suggestion':
suggestion = df_report.iloc[i]['Suggestion']
print('%d. %s' % (i, suggestion))
#print(df_report[['Metric', 'Value', 'Reference Value']])
#print(df_report[['Suggestion']])
#print('Tag of optimal image recommended from POTATO: ' + highlight(docker_image))
print('\n')
print_hint('Please re-launch KubeFlow Jupyter-notebook to have suggested images or resources if necessary.')
sofa_home = os.path.dirname(os.path.realpath(__file__))
subprocess.Popen(
['bash', '-c', 'cp %s/../sofaboard/* %s;' % (sofa_home, cfg.logdir)])
subprocess.Popen(['sleep', '2'])
print('\n\n')
print('Complete!!')
def cluster_analyze(cfg):
if cfg.verbose:
print_title('Cluster Network Profiling :')
cluster = cfg.cluster_ip.split(',')
summary_net = pd.DataFrame([], columns=['Source', 'Destination', 'Amount', 'Percentage of a Node'])
summary_compute = pd.DataFrame([], columns=['gpu_sm_util','gpu_mem_util','cpu_util'])
summary_band = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg'])
all = []
for i, ip in enumerate(cluster):
features = pd.DataFrame({'name':['elapsed_time'],
'value':[cfg.elapsed_time]},
columns=['name','value'])
node = 'node ' + str(i)
if cfg.verbose:
print('node ' + str(i) + ' is ' + ip)
logdir = tmp_dir[0:-1] + '-' + ip + '/'
filein_net = logdir + "nettrace.csv"
filein_mpstat = logdir + "mpstat.csv"
filein_nvsmi = logdir + "nvsmi_trace.csv"
filein_bandwidth = logdir + "netstat.csv"
with open(logdir+'/misc.txt') as f:
lines = f.readlines()
elapsed_time = float(lines[0].split()[1])
vcores = int(lines[2].split()[1])
cfg.elapsed_time = float(lines[0].split()[1])
try:
df_net = pd.read_csv(filein_net)
features = net_profile(logdir, cfg, df_net, features)
except IOError as e:
df_net = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_net)
try:
df_mpstat = pd.read_csv(filein_mpstat)
features = mpstat_profile(logdir, cfg, df_mpstat, features)
except IOError as e:
df_mpstat = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_mpstat)
try:
df_nvsmi = pd.read_csv(filein_nvsmi)
features = nvsmi_profile(logdir, cfg, df_nvsmi, features)
except IOError:
print_warning(cfg, "nvsmi_trace.csv is not found")
try:
df_bandwidth = pd.read_csv(filein_bandwidth)
features = netbandwidth_profile(logdir, cfg, df_bandwidth, features)
except IOError as e:
df_bandwidth = pd.DataFrame([], columns=cfg.columns)
print_warning(cfg, "%s is not found" % filein_bandwidth)
sm = int(features[features['name'] == 'gpu_sm_util']['value'])
mem = int(features[features['name'] == 'gpu_mem_util']['value'])
cpu = int(features[features['name'] == 'cpu_util']['value'])
sm_mem_cpu = [sm, mem, cpu]
compute_tmp = pd.DataFrame([sm_mem_cpu], columns = ['gpu_sm_util', 'gpu_mem_util', 'cpu_util'])
summary_compute = pd.concat([summary_compute, pd.concat([compute_tmp], keys=[node])])
net_tmp = pd.read_csv(logdir + "netrank.csv")
summary_net = pd.concat([summary_net, pd.concat([net_tmp], keys=[node])])
# for bandwidth report
tx = df_bandwidth['event'] == float(0)
rx = df_bandwidth['event'] == float(1)
tx_tmp = [convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[tx]['bandwidth'].mean())]
rx_tmp = [convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.25)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.5)),
convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.75)),
convertbytes(df_bandwidth[rx]['bandwidth'].mean())]
band_tmp = pd.DataFrame([tx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['tx'])
rx_pd = pd.DataFrame([rx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['rx'])
band_tmp = pd.concat([band_tmp, rx_pd])
summary_band = pd.concat([summary_band, pd.concat([band_tmp], keys=[node])])
if cfg.verbose:
with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
print('Ranked Network Traffic : \n', summary_net, '\n')
print('Cluster Bandwidth Quartile: \n', summary_band)
print_title('Cluster Computation Profiling:')
print(summary_compute)
|
apache-2.0
| 536,236,055,129,862,100
| 43.556728
| 259
| 0.541718
| false
| 3.342857
| false
| false
| false
|
kenmcc/mypywws
|
src/pywws/Process.py
|
1
|
29489
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-14 Jim Easterbrook jim@jim-easterbrook.me.uk
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Generate hourly, daily & monthly summaries of raw weather station
data
::
%s
This module takes raw weather station data (typically sampled every
five or ten minutes) and generates hourly, daily and monthly summary
data, which is useful when creating tables and graphs.
Before computing the data summaries, raw data is "calibrated" using a
user-programmable function. See :doc:`pywws.calib` for details.
The hourly data is derived from all the records in one hour, e.g. from
18:00:00 to 18:59:59, and is given the index of the last complete
record in that hour.
The daily data summarises the weather over a 24 hour period typically
ending at 2100 or 0900 hours, local (non DST) time, though midnight is
another popular convention. It is also indexed by the last complete
record in the period. Daytime and nightime, as used when computing
maximum and minimum temperatures, are assumed to start at 0900 and
2100 local time, or 1000 and 2200 when DST is in effect, regardless of
the meteorological day.
To adjust the meteorological day to your preference, or that used by
your local official weather station, edit the "day end hour" line in
your ``weather.ini`` file, then run Reprocess.py to regenerate the
summaries.
Monthly summary data is computed from the daily summary data. If the
meteorological day does not end at midnight, then each month may begin
and end up to 12 hours before or after midnight.
Wind speed data is averaged over the hour (or day) and the maximum
gust speed during the hour (or day) is recorded. The predominant wind
direction is calculated using vector arithmetic.
Rainfall is converted from the raw "total since last reset" figure to
a more useful total in the last hour, day or month.
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
__usage__ = """
usage: python -m pywws.Process [options] data_dir
options are:
-h or --help display this help
-v or --verbose increase number of informative messages
data_dir is the root directory of the weather data
"""
__doc__ %= __usage__
__usage__ = __doc__.split('\n')[0] + __usage__
from collections import deque
from datetime import date, datetime, timedelta
import getopt
import logging
import math
import os
import sys
from .calib import Calib
from . import DataStore
from .Logger import ApplicationLogger
from .TimeZone import STDOFFSET, HOUR
SECOND = timedelta(seconds=1)
TIME_ERR = timedelta(seconds=45)
MINUTEx5 = timedelta(minutes=5)
HOURx3 = timedelta(hours=3)
DAY = timedelta(hours=24)
WEEK = timedelta(days=7)
class Average(object):
"""Compute average of multiple data values."""
def __init__(self):
self.acc = 0.0
self.count = 0
def add(self, value):
if value is None:
return
self.acc += value
self.count += 1
def result(self):
if self.count == 0:
return None
return self.acc / float(self.count)
class Minimum(object):
"""Compute minimum value and timestamp of multiple data values."""
def __init__(self):
self.value = None
self.time = None
def add(self, value, time):
if not self.time or value <= self.value:
self.value = value
self.time = time
def result(self):
if self.time:
return self.value, self.time
return None, None
class Maximum(object):
"""Compute maximum value and timestamp of multiple data values."""
def __init__(self):
self.value = None
self.time = None
def add(self, value, time):
if not self.time or value > self.value:
self.value = value
self.time = time
def result(self):
if self.time:
return self.value, self.time
return None, None
sin_LUT = map(
lambda x: math.sin(math.radians(float(x * 360) / 16.0)), range(16))
cos_LUT = map(
lambda x: math.cos(math.radians(float(x * 360) / 16.0)), range(16))
class WindFilter(object):
"""Compute average wind speed and direction.
The wind speed and direction of each data item is converted to a
vector before averaging, so the result reflects the dominant wind
direction during the time period covered by the data.
Setting the ``decay`` parameter converts the filter from a simple
averager to one where the most recent sample carries the highest
weight, and earlier samples have a lower weight according to how
long ago they were.
This process is an approximation of "exponential smoothing". See
`Wikipedia <http://en.wikipedia.org/wiki/Exponential_smoothing>`_
for a detailed discussion.
The parameter ``decay`` corresponds to the value ``(1 - alpha)``
in the Wikipedia description. Because the weather data being
smoothed may not be at regular intervals this parameter is the
decay over 5 minutes. Weather data at other intervals will have
its weight scaled accordingly.
The return value is a (speed, direction) tuple.
:param decay: filter coefficient decay rate.
:type decay: float
:rtype: (float, float)
"""
def __init__(self, decay=1.0):
self.decay = decay
self.Ve = 0.0
self.Vn = 0.0
self.total = 0.0
self.weight = 1.0
self.total_weight = 0.0
self.last_idx = None
def add(self, data):
direction = data['wind_dir']
speed = data['wind_ave']
if direction is None or speed is None:
return
if self.last_idx and self.decay != 1.0:
interval = data['idx'] - self.last_idx
assert interval.days == 0
decay = self.decay
if interval != MINUTEx5:
decay = decay ** (float(interval.seconds) /
float(MINUTEx5.seconds))
self.weight = self.weight / decay
self.last_idx = data['idx']
speed = speed * self.weight
if isinstance(direction, int):
self.Ve -= speed * sin_LUT[direction]
self.Vn -= speed * cos_LUT[direction]
else:
direction = math.radians(float(direction) * 22.5)
self.Ve -= speed * math.sin(direction)
self.Vn -= speed * math.cos(direction)
self.total += speed
self.total_weight += self.weight
def result(self):
if self.total_weight == 0.0:
return (None, None)
return (self.total / self.total_weight,
(math.degrees(math.atan2(self.Ve, self.Vn)) + 180.0) / 22.5)
class HourAcc(object):
"""'Accumulate' raw weather data to produce hourly summary.
Compute average wind speed and maximum wind gust, find dominant
wind direction and compute total rainfall.
"""
def __init__(self, last_rain):
self.logger = logging.getLogger('pywws.Process.HourAcc')
self.last_rain = last_rain
self.copy_keys = ['idx', 'hum_in', 'temp_in', 'hum_out', 'temp_out',
'abs_pressure', 'rel_pressure', 'temp_bedroom','temp_kitchen', "temp_bed2"]
self.reset()
def reset(self):
self.wind_fil = WindFilter()
self.wind_gust = (-2.0, None)
self.rain = 0.0
self.retval = {'idx' : None, 'temp_out' : None}
def add_raw(self, data):
idx = data['idx']
self.wind_fil.add(data)
wind_gust = data['wind_gust']
if wind_gust is not None and wind_gust > self.wind_gust[0]:
self.wind_gust = (wind_gust, idx)
rain = data['rain']
if rain is not None:
if self.last_rain is not None:
diff = rain - self.last_rain
if diff < -0.001:
self.logger.warning(
'%s rain reset %.1f -> %.1f', str(idx), self.last_rain, rain)
elif diff > float(data['delay'] * 5):
# rain exceeds 5mm / minute, assume corrupt data and ignore it
self.logger.warning(
'%s rain jump %.1f -> %.1f', str(idx), self.last_rain, rain)
else:
self.rain += max(0.0, diff)
self.last_rain = rain
# copy some current readings
if 'illuminance' in data and not 'illuminance' in self.copy_keys:
self.copy_keys.append('illuminance')
self.copy_keys.append('uv')
# if near the end of the hour, ignore 'lost contact' readings
if (data['idx'].minute < 45 or data['temp_out'] is not None or
self.retval['temp_out'] is None):
for key in self.copy_keys:
if key in data:
self.retval[key] = data[key]
def result(self):
if not self.retval['idx']:
return None
self.retval['wind_ave'], self.retval['wind_dir'] = self.wind_fil.result()
if self.wind_gust[1]:
self.retval['wind_gust'] = self.wind_gust[0]
else:
self.retval['wind_gust'] = None
self.retval['rain'] = self.rain
return self.retval
class DayAcc(object):
"""'Accumulate' weather data to produce daily summary.
Compute average wind speed, maximum wind gust and daytime max &
nighttime min temperatures, find dominant wind direction and
compute total rainfall.
Daytime is assumed to be 0900-2100 and nighttime to be 2100-0900,
local time (1000-2200 and 2200-1000 during DST), regardless of the
"day end hour" setting.
"""
def __init__(self):
self.logger = logging.getLogger('pywws.Process.DayAcc')
self.has_illuminance = False
self.ave = {}
self.max = {}
self.min = {}
self.reset()
def reset(self):
self.wind_fil = WindFilter()
self.wind_gust = (-1.0, None)
self.rain = 0.0
for i in ('temp_in', 'temp_out', 'hum_in', 'hum_out',
'abs_pressure', 'rel_pressure', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
self.ave[i] = Average()
self.max[i] = Maximum()
self.min[i] = Minimum()
for i in ('illuminance', 'uv'):
self.ave[i] = Average()
self.max[i] = Maximum()
self.retval = dict()
def add_raw(self, data):
idx = data['idx']
local_hour = (idx + STDOFFSET).hour
wind_gust = data['wind_gust']
if wind_gust is not None and wind_gust > self.wind_gust[0]:
self.wind_gust = (wind_gust, idx)
for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
#if i in data:
try:
temp = data[i]
except:
temp = 0
if temp is not None:
self.ave[i].add(temp)
if local_hour >= 9 and local_hour < 21:
# daytime max temperature
self.max[i].add(temp, idx)
else:
# nighttime min temperature
self.min[i].add(temp, idx)
for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'):
if i in data:
value = data[i]
if value is not None:
self.ave[i].add(value)
self.max[i].add(value, idx)
self.min[i].add(value, idx)
if 'illuminance' in data:
self.has_illuminance = True
for i in ('illuminance', 'uv'):
if i in data:
value = data[i]
if value is not None:
self.ave[i].add(value)
self.max[i].add(value, idx)
def add_hourly(self, data):
self.wind_fil.add(data)
rain = data['rain']
if rain is not None:
self.rain += rain
self.retval['idx'] = data['idx']
def result(self):
if not self.retval:
return None
self.retval['wind_ave'], self.retval['wind_dir'] = self.wind_fil.result()
if self.wind_gust[1]:
self.retval['wind_gust'] = self.wind_gust[0]
else:
self.retval['wind_gust'] = None
self.retval['wind_gust_t'] = self.wind_gust[1]
self.retval['rain'] = self.rain
for i in ('temp_in', 'temp_out', 'hum_in', 'hum_out',
'abs_pressure', 'rel_pressure', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
self.retval['%s_ave' % i] = self.ave[i].result()
(self.retval['%s_max' % i],
self.retval['%s_max_t' % i]) = self.max[i].result()
(self.retval['%s_min' % i],
self.retval['%s_min_t' % i]) = self.min[i].result()
if self.has_illuminance:
for i in ('illuminance', 'uv'):
self.retval['%s_ave' % i] = self.ave[i].result()
(self.retval['%s_max' % i],
self.retval['%s_max_t' % i]) = self.max[i].result()
return self.retval
class MonthAcc(object):
"""'Accumulate' daily weather data to produce monthly summary.
Compute daytime max & nighttime min temperatures.
"""
def __init__(self, rain_day_threshold):
self.rain_day_threshold = rain_day_threshold
self.has_illuminance = False
self.ave = {}
self.min = {}
self.max = {}
self.min_lo = {}
self.min_hi = {}
self.min_ave = {}
self.max_lo = {}
self.max_hi = {}
self.max_ave = {}
self.reset()
def reset(self):
for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
self.ave[i] = Average()
self.min_lo[i] = Minimum()
self.min_hi[i] = Maximum()
self.min_ave[i] = Average()
self.max_lo[i] = Minimum()
self.max_hi[i] = Maximum()
self.max_ave[i] = Average()
for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'):
self.ave[i] = Average()
self.max[i] = Maximum()
self.min[i] = Minimum()
for i in ('illuminance', 'uv'):
self.ave[i] = Average()
self.max_lo[i] = Minimum()
self.max_hi[i] = Maximum()
self.max_ave[i] = Average()
self.wind_fil = WindFilter()
self.wind_gust = (-1.0, None)
self.rain = 0.0
self.rain_days = 0
self.valid = False
def add_daily(self, data):
self.idx = data['idx']
for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
try:
temp = data['%s_ave' % i]
except:
temp = 0
if temp is not None:
self.ave[i].add(temp)
try:
temp = data['%s_min' % i]
except:
temp = 0
if temp is not None:
try:
self.min_lo[i].add(temp, data['%s_min_t' % i])
except:
self.min_lo[i].add(temp, 0)
try:
self.min_hi[i].add(temp, data['%s_min_t' % i])
except:
self.min_hi[i].add(temp, 0)
self.min_ave[i].add(temp)
try:
temp = data['%s_max' % i]
except:
temp = 0
if temp is not None:
try:
self.max_lo[i].add(temp, data['%s_max_t' % i])
except:
self.max_lo[i].add(temp, 0)
try:
self.max_hi[i].add(temp, data['%s_max_t' % i])
except:
self.max_hi[i].add(temp, 0)
self.max_ave[i].add(temp)
for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'):
value = data['%s_ave' % i]
if value is not None:
self.ave[i].add(value)
value = data['%s_min' % i]
if value is not None:
self.min[i].add(value, data['%s_min_t' % i])
value = data['%s_max' % i]
if value is not None:
self.max[i].add(value, data['%s_max_t' % i])
self.wind_fil.add(data)
wind_gust = data['wind_gust']
if wind_gust is not None and wind_gust > self.wind_gust[0]:
self.wind_gust = (wind_gust, data['wind_gust_t'])
if 'illuminance_ave' in data:
self.has_illuminance = True
for i in ('illuminance', 'uv'):
value = data['%s_ave' % i]
if value is not None:
self.ave[i].add(value)
value = data['%s_max' % i]
if value is not None:
self.max_lo[i].add(value, data['%s_max_t' % i])
self.max_hi[i].add(value, data['%s_max_t' % i])
self.max_ave[i].add(value)
self.rain += data['rain']
if data['rain'] >= self.rain_day_threshold:
self.rain_days += 1
self.valid = True
def result(self):
if not self.valid:
return None
result = {}
result['idx'] = self.idx
result['rain'] = self.rain
result['rain_days'] = self.rain_days
for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"):
result['%s_ave' % i] = self.ave[i].result()
result['%s_min_ave' % i] = self.min_ave[i].result()
(result['%s_min_lo' % i],
result['%s_min_lo_t' % i]) = self.min_lo[i].result()
(result['%s_min_hi' % i],
result['%s_min_hi_t' % i]) = self.min_hi[i].result()
result['%s_max_ave' % i] = self.max_ave[i].result()
(result['%s_max_lo' % i],
result['%s_max_lo_t' % i]) = self.max_lo[i].result()
(result['%s_max_hi' % i],
result['%s_max_hi_t' % i]) = self.max_hi[i].result()
for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'):
result['%s_ave' % i] = self.ave[i].result()
(result['%s_max' % i],
result['%s_max_t' % i]) = self.max[i].result()
(result['%s_min' % i],
result['%s_min_t' % i]) = self.min[i].result()
result['wind_ave'], result['wind_dir'] = self.wind_fil.result()
if self.wind_gust[1]:
result['wind_gust'] = self.wind_gust[0]
else:
result['wind_gust'] = None
result['wind_gust_t'] = self.wind_gust[1]
if self.has_illuminance:
for i in ('illuminance', 'uv'):
result['%s_ave' % i] = self.ave[i].result()
result['%s_max_ave' % i] = self.max_ave[i].result()
(result['%s_max_lo' % i],
result['%s_max_lo_t' % i]) = self.max_lo[i].result()
(result['%s_max_hi' % i],
result['%s_max_hi_t' % i]) = self.max_hi[i].result()
return result
def calibrate_data(logger, params, raw_data, calib_data):
"""'Calibrate' raw data, using a user-supplied function."""
start = calib_data.before(datetime.max)
if start is None:
start = datetime.min
before = raw_data.before(start)
start = raw_data.after(start)# + SECOND)
if start is None and before is None:
return start
else:
start = before
del calib_data[start:]
calibrator = Calib(params, raw_data)
count = 0
for data in raw_data[start:]:
idx = data['idx']
count += 1
if count % 10000 == 0:
logger.info("calib: %s", idx.isoformat(' '))
elif count % 500 == 0:
logger.debug("calib: %s", idx.isoformat(' '))
calib_data[idx] = calibrator.calib(data)
return start
def generate_hourly(logger, calib_data, hourly_data, process_from):
"""Generate hourly summaries from calibrated data."""
start = hourly_data.before(datetime.max)
if start is None:
start = datetime.min
start = calib_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# set start of hour in local time (not all time offsets are integer hours)
start += STDOFFSET + timedelta(minutes=5)
start = start.replace(minute=0, second=0)
start -= STDOFFSET
#del hourly_data[start:]
# preload pressure history, and find last valid rain
prev = None
pressure_history = deque()
last_rain = None
for data in calib_data[start - HOURx3:start]:
if data['rel_pressure']:
pressure_history.append((data['idx'], data['rel_pressure']))
if data['rain'] is not None:
last_rain = data['rain']
prev = data
# iterate over data in one hour chunks
stop = calib_data.before(datetime.max)
hour_start = start
acc = HourAcc(last_rain)
count = 0
while hour_start <= stop:
count += 1
if count % 1008 == 0:
logger.info("hourly: %s", hour_start.isoformat(' '))
elif count % 24 == 0:
logger.debug("hourly: %s", hour_start.isoformat(' '))
hour_end = hour_start + HOUR
acc.reset()
for data in calib_data[hour_start:hour_end]:
if data['rel_pressure']:
pressure_history.append((data['idx'], data['rel_pressure']))
if prev:
err = data['idx'] - prev['idx']
#if abs(err - timedelta(minutes=data['delay'])) > TIME_ERR:
# logger.info('unexpected data interval %s %s',
# data['idx'].isoformat(' '), str(err))
acc.add_raw(data)
prev = data
new_data = acc.result()
if new_data and new_data['idx'].minute >= 1: # was 9
# compute pressure trend
new_data['pressure_trend'] = None
if new_data['rel_pressure']:
target = new_data['idx'] - HOURx3
while (len(pressure_history) >= 2 and
abs(pressure_history[0][0] - target) >
abs(pressure_history[1][0] - target)):
pressure_history.popleft()
if (pressure_history and
abs(pressure_history[0][0] - target) < HOUR):
new_data['pressure_trend'] = (
new_data['rel_pressure'] - pressure_history[0][1])
# store new hourly data
t = new_data['idx']# + timedelta(minutes=5)
# round up to the next hour
t = t +timedelta(minutes=60)
t = t.replace(minute=0, second=0)
print "INDEX:", t
new_data['idx'] = t
hourly_data[t] = new_data
hour_start = hour_end
return start
def generate_daily(logger, day_end_hour,
calib_data, hourly_data, daily_data, process_from):
"""Generate daily summaries from calibrated and hourly data."""
start = daily_data.before(datetime.max)
if start is None:
start = datetime.min
start = calib_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# round to start of this day, in local time
start += STDOFFSET
if start.hour < day_end_hour:
start = start - DAY
start = start.replace(hour=day_end_hour, minute=0, second=0)
start -= STDOFFSET
del daily_data[start:]
stop = calib_data.before(datetime.max)
day_start = start
acc = DayAcc()
count = 0
while day_start <= stop:
count += 1
if count % 30 == 0:
logger.info("daily: %s", day_start.isoformat(' '))
else:
logger.debug("daily: %s", day_start.isoformat(' '))
day_end = day_start + DAY
acc.reset()
for data in calib_data[day_start:day_end]:
acc.add_raw(data)
for data in hourly_data[day_start:day_end]:
acc.add_hourly(data)
new_data = acc.result()
if new_data:
new_data['start'] = day_start
daily_data[new_data['idx']] = new_data
day_start = day_end
return start
def generate_monthly(logger, rain_day_threshold, day_end_hour,
daily_data, monthly_data, process_from):
"""Generate monthly summaries from daily data."""
start = monthly_data.before(datetime.max)
if start is None:
start = datetime.min
start = daily_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# set start to start of first day of month (local time)
start += STDOFFSET
start = start.replace(day=1, hour=day_end_hour, minute=0, second=0)
if day_end_hour >= 12:
# month actually starts on the last day of previous month
start -= DAY
start -= STDOFFSET
del monthly_data[start:]
stop = daily_data.before(datetime.max)
month_start = start
acc = MonthAcc(rain_day_threshold)
count = 0
while month_start <= stop:
count += 1
if count % 12 == 0:
logger.info("monthly: %s", month_start.isoformat(' '))
else:
logger.debug("monthly: %s", month_start.isoformat(' '))
month_end = month_start + WEEK
if month_end.month < 12:
month_end = month_end.replace(month=month_end.month+1)
else:
month_end = month_end.replace(month=1, year=month_end.year+1)
month_end = month_end - WEEK
acc.reset()
for data in daily_data[month_start:month_end]:
acc.add_daily(data)
new_data = acc.result()
if new_data:
new_data['start'] = month_start
monthly_data[new_data['idx']] = new_data
month_start = month_end
return start
def Process(params,
raw_data, calib_data, hourly_data, daily_data, monthly_data):
"""Generate summaries from raw weather station data.
The meteorological day end (typically 2100 or 0900 local time) is
set in the preferences file ``weather.ini``. The default value is
2100 (2200 during DST), following the historical convention for
weather station readings.
"""
logger = logging.getLogger('pywws.Process')
logger.info('Generating summary data')
# get time of last record
last_raw = raw_data.before(datetime.max)
print "LAST RAW is ", last_raw
if last_raw is None:
raise IOError('No data found. Check data directory parameter.')
# get daytime end hour (in local time)
day_end_hour = eval(params.get('config', 'day end hour', '21')) % 24
# get other config
rain_day_threshold = eval(params.get('config', 'rain day threshold', '0.2'))
# calibrate raw data
start = calibrate_data(logger, params, raw_data, calib_data)
# generate hourly data
print "Generating hourly data from ", start
start = generate_hourly(logger, calib_data, hourly_data, start)
# generate daily data
start = generate_daily(logger, day_end_hour,
calib_data, hourly_data, daily_data, start)
# generate monthly data
generate_monthly(logger, rain_day_threshold, day_end_hour,
daily_data, monthly_data, start)
return 0
def main(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "hv", ['help', 'verbose'])
except getopt.error, msg:
print >>sys.stderr, 'Error: %s\n' % msg
print >>sys.stderr, __usage__.strip()
return 1
# process options
verbose = 0
for o, a in opts:
if o in ('-h', '--help'):
print __usage__.strip()
return 0
elif o in ('-v', '--verbose'):
verbose += 1
# check arguments
if len(args) != 1:
print >>sys.stderr, 'Error: 1 argument required\n'
print >>sys.stderr, __usage__.strip()
return 2
logger = ApplicationLogger(verbose)
data_dir = args[0]
return Process(DataStore.params(data_dir),
DataStore.data_store(data_dir),
DataStore.calib_store(data_dir),
DataStore.hourly_store(data_dir),
DataStore.daily_store(data_dir),
DataStore.monthly_store(data_dir))
if __name__ == "__main__":
sys.exit(main())
|
gpl-2.0
| -5,708,486,361,000,318,000
| 35.953634
| 101
| 0.555733
| false
| 3.624954
| false
| false
| false
|
xolox/python-linux-utils
|
linux_utils/tabfile.py
|
1
|
2307
|
# linux-utils: Linux system administration tools for Python.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: February 9, 2020
# URL: https://linux-utils.readthedocs.io
"""Generic parsing of Linux configuration files like ``/etc/fstab`` and ``/etc/crypttab``."""
# Standard library modules.
import re
# External dependencies.
from property_manager import PropertyManager, mutable_property
# Modules included in our package.
from linux_utils import coerce_context
# Public identifiers that require documentation.
__all__ = (
'TabFileEntry',
'parse_tab_file',
)
def parse_tab_file(filename, context=None, encoding='UTF-8'):
"""
Parse a Linux configuration file like ``/etc/fstab`` or ``/etc/crypttab``.
:param filename: The absolute pathname of the file to parse (a string).
:param context: See :func:`.coerce_context()` for details.
:param encoding: The name of the text encoding of the file (a string).
:returns: A generator of :class:`TabFileEntry` objects.
This function strips comments (the character ``#`` until the end of
the line) and splits each line into tokens separated by whitespace.
"""
context = coerce_context(context)
contents = context.read_file(filename).decode(encoding)
for line_number, line in enumerate(contents.splitlines(), start=1):
# Strip comments.
line = re.sub('#.*', '', line)
# Tokenize input.
tokens = line.split()
if tokens:
yield TabFileEntry(
context=context,
configuration_file=filename,
line_number=line_number,
tokens=tokens,
)
class TabFileEntry(PropertyManager):
"""Container for the results of :func:`parse_tab_file()`."""
@mutable_property
def context(self):
"""The execution context from which the configuration file was retrieved."""
@mutable_property
def configuration_file(self):
"""The name of the configuration file from which this entry was parsed (a string)."""
@mutable_property
def line_number(self):
"""The line number from which this entry was parsed (an integer)."""
@mutable_property
def tokens(self):
"""The tokens split on whitespace (a nonempty list of strings)."""
|
mit
| 3,480,510,046,944,304,600
| 31.492958
| 93
| 0.662765
| false
| 4.202186
| true
| false
| false
|
sassoftware/rmake3
|
rmake/worker/resolvesource.py
|
1
|
30654
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
from conary.deps import deps
from conary.local import deptable
from conary.conaryclient import resolve
from conary.repository import trovesource
from rmake.lib import flavorutil
class TroveSourceMesh(trovesource.SearchableTroveSource):
def __init__(self, extraSource, mainSource, repos):
trovesource.SearchableTroveSource.__init__(self)
self.extraSource = extraSource
self.mainSource = mainSource
self.repos = repos
trovesource.SearchableTroveSource.__init__(self)
self.searchAsRepository()
for source in self.mainSource, self.repos, self.extraSource:
if not source:
continue
self._allowNoLabel = source._allowNoLabel
self._bestFlavor = source._bestFlavor
self._getLeavesOnly = source._getLeavesOnly
self._flavorCheck = source._flavorCheck
break
self.sources = [ self.extraSource]
if self.mainSource:
self.sources.append(self.mainSource)
if self.repos:
self.sources.append(self.repos)
def __getattr__(self, key):
if self.repos:
return getattr(self.repos, key)
return getattr(self.mainSource, key)
def getFileVersions(self, *args, **kw):
if self.repos:
return self.repos.getFileVersions(*args, **kw)
return self.mainSource.getFileVersions(*args, **kw)
def close(self):
pass
def hasTroves(self, troveList):
if self.repos:
results = self.repos.hasTroves(troveList)
if isinstance(results, dict):
results = [ results[x] for x in troveList ]
else:
results = [ False for x in troveList ]
if self.extraSource:
hasTroves = self.extraSource.hasTroves(troveList)
results = [ x[0] or x[1] for x in itertools.izip(results,
hasTroves) ]
if self.mainSource:
hasTroves = self.mainSource.hasTroves(troveList)
results = [ x[0] or x[1] for x in itertools.izip(results,
hasTroves) ]
return dict(itertools.izip(troveList, results))
def trovesByName(self, name):
if self.mainSource:
return list(set(self.mainSource.trovesByName(name))
| set(self.extraSource.trovesByName(name)))
else:
return self.extraSource.trovesByName(name)
def getTroves(self, troveList, *args, **kw):
if self.repos:
return self.repos.getTroves(troveList, *args, **kw)
else:
return self.mainSource.getTroves(troveList, *args, **kw)
def _mergeTroveQuery(self, resultD, response):
if isinstance(resultD, dict):
for troveName, troveVersions in response.iteritems():
if not resultD.has_key(troveName):
resultD[troveName] = {}
versionDict = resultD[troveName]
for version, flavors in troveVersions.iteritems():
if version not in versionDict:
versionDict[version] = []
resultD[troveName][version].extend(flavors)
else:
if not resultD:
for resultList in response:
resultD.append(list(resultList))
else:
for idx, resultList in enumerate(response):
resultD[idx].extend(resultList)
return resultD
def _mergeListTroveQuery(self, resultList, result2, altFlavors, altFlavors2,
map, query):
newMap = []
newQuery = []
for idx, items in enumerate(result2):
if not items:
newMap.append(map[idx])
newQuery.append(query[idx])
if altFlavors2:
altFlavors[map[idx]].extend(altFlavors2[idx])
else:
resultList[map[idx]].extend(items)
altFlavors[map[idx]] = []
return newMap, newQuery
def _call(self, fn, query, *args, **kw):
if not isinstance(query, dict):
query = list(query)
result, altFlavors = getattr(self.extraSource, fn)(query,
*args, **kw)
map = []
newQuery = []
for idx, item in enumerate(result):
if not item:
map.append(idx)
newQuery.append(query[idx])
if self.mainSource:
result2, altFlavors2 = getattr(self.mainSource, fn)(newQuery,
*args, **kw)
newQuery, map = self._mergeListTroveQuery(result, result2,
altFlavors,
altFlavors2,
map, newQuery)
if self.repos:
result3, altFlavors3 = getattr(self.repos, fn)(newQuery,
*args, **kw)
newQuery, map = self._mergeListTroveQuery(result, result3,
altFlavors,
altFlavors3,
map, newQuery)
result = result, altFlavors
else:
query = dict(query)
d1 = getattr(self.extraSource, fn)(query, *args, **kw)
result = {}
self._mergeTroveQuery(result, d1)
for name in result:
query.pop(name)
if self.mainSource:
d2 = getattr(self.mainSource, fn)(query, *args, **kw)
self._mergeTroveQuery(result, d2)
if self.repos:
d3 = getattr(self.repos, fn)(query, *args, **kw)
self._mergeTroveQuery(result, d3)
return result
def _addLabelsToQuery(self, query):
if isinstance(query, dict):
newQuery = query.copy()
names = query
for name in query:
labels = set(x[1].trailingLabel() for x in
self.extraSource.trovesByName(name))
#asserts there is only one flavorList
flavorList, = set(x and tuple(x) for x in query[name].values())
for label in labels:
if label not in query[name]:
newQuery[name][label] = flavorList
map = None
else:
map = {}
newQuery = list(query)
names = [(x[0], x[1][0], x[1][2]) for x in enumerate(query)]
for idx, name, flavor in names:
labels = set(x[1].trailingLabel() for x in
self.extraSource.trovesByName(name))
for label in labels:
map[len(newQuery)] = idx
newQuery.append((name, label, flavor))
return newQuery, map
def _compressResults(self, results, map):
if map is None:
return results
results, altFlavors = results
finalResults = []
for idx, result in enumerate(results):
if idx in map:
if result:
finalResults[map[idx]].extend(result)
altFlavors[map[idx]] = []
else:
altFlavors[map[idx]].extend(altFlavors)
else:
finalResults.append(result)
return finalResults, altFlavors
def getTroveLatestByLabel(self, query, *args, **kw):
map = None
if self.expandLabelQueries:
query, map = self._addLabelsToQuery(query)
results = self._call('getTroveLatestByLabel', query, *args, **kw)
return self._compressResults(results, map)
def getTroveLeavesByLabel(self, query, *args, **kw):
map = None
if self.expandLabelQueries:
query, map = self._addLabelsToQuery(query)
results = self._call('getTroveLeavesByLabel', query, *args, **kw)
return self._compressResults(results, map)
def getTroveVersionsByLabel(self, query, *args, **kw):
map = None
if self.expandLabelQueries:
query, map = self._addLabelsToQuery(query)
results = self._call('getTroveVersionsByLabel', query, *args, **kw)
return self._compressResults(results, map)
def getTroveLeavesByBranch(self, query, *args, **kw):
return self._call('getTroveLeavesByBranch', query, *args, **kw)
def getTroveVersionsByBranch(self, query, *args, **kw):
return self._call('getTroveVersionsByBranch', query, *args, **kw)
def getTroveVersionFlavors(self, query, *args, **kw):
return self._call('getTroveVersionFlavors', query, *args, **kw)
def findTroves(self, labelPath, troveSpecs, defaultFlavor=None,
acrossLabels=False, acrossFlavors=False,
affinityDatabase=None, allowMissing=False,
bestFlavor=None, getLeaves=None,
troveTypes=trovesource.TROVE_QUERY_PRESENT,
exactFlavors=False,
**kw):
if self.mainSource is None:
return trovesource.SearchableTroveSource.findTroves(self,
labelPath, troveSpecs,
defaultFlavor=defaultFlavor,
acrossLabels=acrossLabels,
acrossFlavors=acrossFlavors,
affinityDatabase=affinityDatabase,
troveTypes=troveTypes,
exactFlavors=exactFlavors,
allowMissing=True,
**kw)
results = {}
if bestFlavor is not None:
kw.update(bestFlavor=bestFlavor)
if getLeaves is not None:
kw.update(getLeaves=getLeaves)
for source in self.sources:
if source == self.repos:
# we need the labelPath for repos, otherwise
# we allow other algorithms to determine which
# version of a particular trove to use - the same ones
# used during dep resolution. Sometimes this will not
# be a package on the ILP.
searchLabelPath = labelPath
else:
searchLabelPath = None
foundTroves = source.findTroves(searchLabelPath, troveSpecs,
defaultFlavor=defaultFlavor,
acrossLabels=acrossLabels,
acrossFlavors=acrossFlavors,
affinityDatabase=affinityDatabase,
troveTypes=troveTypes,
exactFlavors=exactFlavors,
allowMissing=True,
**kw)
for troveSpec, troveTups in foundTroves.iteritems():
results.setdefault(troveSpec, []).extend(troveTups)
if not allowMissing:
for troveSpec in troveSpecs:
assert(troveSpec in results)
return results
def resolveDependencies(self, label, depList, *args, **kw):
sugg = self.extraSource.resolveDependencies(label, depList, *args, **kw)
sugg2 = self.repos.resolveDependencies(label, depList, *args, **kw)
for depSet, trovesByDep in sugg.iteritems():
for idx, troveList in enumerate(trovesByDep):
if not troveList:
troveList.extend(sugg2[depSet][idx])
return sugg
def resolveDependenciesByGroups(self, troveList, depList):
sugg = self.extraSource.resolveDependencies(None, depList)
sugg2 = self.repos.resolveDependenciesByGroups(troveList, depList)
for depSet, trovesByDep in sugg.iteritems():
for idx, troveList in enumerate(trovesByDep):
if not troveList:
troveList.extend(sugg2[depSet][idx])
return sugg
class DepHandlerSource(TroveSourceMesh):
def __init__(self, builtTroveSource, troveListList, repos=None,
useInstallLabelPath=True, expandLabelQueries=False):
if repos:
flavorPrefs = repos._flavorPreferences
else:
flavorPrefs = []
stack = trovesource.TroveSourceStack()
stack.searchWithFlavor()
stack.setFlavorPreferenceList(flavorPrefs)
self.setFlavorPreferenceList(flavorPrefs)
self.expandLabelQueries = expandLabelQueries
self.resolveTroveSource = None
if isinstance(troveListList, trovesource.SimpleTroveSource):
troveListList.setFlavorPreferenceList(flavorPrefs)
self.stack.addSource(troveListList)
self.resolveTroveSource = troveListList
else:
if troveListList:
for troveList in troveListList:
allTroves = [ x.getNameVersionFlavor() for x in troveList ]
childTroves = itertools.chain(*
(x.iterTroveList(weakRefs=True,
strongRefs=True)
for x in troveList))
allTroves.extend(childTroves)
source = trovesource.SimpleTroveSource(allTroves)
source.searchWithFlavor()
source.setFlavorPreferenceList(flavorPrefs)
stack.addSource(source)
self.resolveTroveSource = stack
if not useInstallLabelPath:
repos = None
if not stack.sources:
stack = None
TroveSourceMesh.__init__(self, builtTroveSource, stack, repos)
def __repr__(self):
return 'DepHandlerSource(%r,%r,%r)' % (self.extraSource, self.mainSource, self.repos)
def copy(self):
inst = self.__class__(self.source, None, self.repos)
inst.repos = self.repos
return inst
class BuiltTroveSource(trovesource.SimpleTroveSource):
"""
Trove source that is used for dep resolution and buildreq satisfaction
only - it does not contain references to the changesets that are added
"""
def __init__(self, troves, repos):
self.depDb = deptable.DependencyDatabase()
trovesource.SimpleTroveSource.__init__(self)
self.setFlavorPreferenceList(repos._flavorPreferences)
self.idMap = []
self.idx = 0
for trove in troves:
self.addTrove(trove.getNameVersionFlavor(), trove.getProvides(),
trove.getRequires())
self.searchWithFlavor()
def close(self):
self.depDb.db.close()
def __del__(self):
self.depDb.db.close()
def addTrove(self, troveTuple, provides, requires):
self._trovesByName.setdefault(troveTuple[0],set()).add(troveTuple)
self.idMap.append(troveTuple)
self.depDb.add(self.idx, provides, requires)
self.idx += 1
def addChangeSet(self, cs):
for idx, trvCs in enumerate(cs.iterNewTroveList()):
self.addTrove(trvCs.getNewNameVersionFlavor(), trvCs.getProvides(),
trvCs.getRequires())
def resolveDependencies(self, label, depList, leavesOnly=False):
suggMap = self.depDb.resolve(label, depList)
for depSet, solListList in suggMap.iteritems():
newSolListList = []
for solList in solListList:
if not self._allowNoLabel and label:
newSolListList.append([ self.idMap[x] for x in solList if self.idMap[x][1].trailingLabel == label])
else:
newSolListList.append([ self.idMap[x] for x in solList ])
suggMap[depSet] = newSolListList
return suggMap
class ResolutionMesh(resolve.BasicResolutionMethod):
def __init__(self, cfg, extraMethod, mainMethod):
resolve.BasicResolutionMethod.__init__(self, cfg, None)
self.extraMethod = extraMethod
self.mainMethod = mainMethod
def prepareForResolution(self, depList):
self.depList = [ x[1] for x in depList]
self.extraMethod.prepareForResolution(depList)
return self.mainMethod.prepareForResolution(depList)
def resolveDependencies(self):
suggMap = self.extraMethod.resolveDependencies()
suggMap2 = self.mainMethod.resolveDependencies()
for depSet in self.depList:
if depSet not in suggMap:
suggMap[depSet] = [[] for x in depSet.iterDeps() ]
if depSet not in suggMap2:
suggMap2[depSet] = [[] for x in depSet.iterDeps() ]
for depSet, results in suggMap.iteritems():
mainResults = suggMap2[depSet]
for troveList1, troveList2 in itertools.izip(results, mainResults):
troveList2.extend(troveList1)
return suggMap2
def searchLeavesOnly(self):
self.extraMethod.searchLeavesOnly()
self.mainMethod.searchLeavesOnly()
def searchLeavesFirst(self):
self.extraMethod.searchLeavesFirst()
self.mainMethod.searchLeavesFirst()
def searchAllVersions(self):
self.extraMethod.searchAllVersions()
self.mainMethod.searchAllVersions()
def selectResolutionTrove(self, requiredBy, dep, depClass,
troveTups, installFlavor, affFlavorDict):
"""
determine which of the given set of troveTups is the
best choice for installing on this system. Because the
repository didn't try to determine which flavors are best for
our system, we have to filter the troves locally.
"""
#NOTE: this method should be a match exactly for the one in
# conary.repository.resolvemethod for conary 1.2 and later.
# when we drop support for earlier conary's we can drop this method.
# we filter the troves in the following ways:
# 1. prefer troves that match affinity flavor + are on the affinity
# label. (And don't drop an arch)
# 2. fall back to troves that match the install flavor.
# If we don't match an affinity flavor + label, then use flavor
# preferences and flavor scoring to select the best flavor.
# We'll have to check
# Within these two categories:
# 1. filter via flavor preferences for each trove (this may result
# in an older version for some troves)
# 2. only leave the latest version for each trove
# 3. pick the best flavor out of the remaining
affinityMatches = []
affinityFlavors = []
otherMatches = []
otherFlavors = []
if installFlavor is not None and not installFlavor.isEmpty():
flavoredList = []
for troveTup in troveTups:
label = troveTup[1].trailingLabel()
affTroves = affFlavorDict[troveTup[0]]
found = False
if affTroves:
for affName, affVersion, affFlavor in affTroves:
if affVersion.trailingLabel() != label:
continue
newFlavor = deps.overrideFlavor(installFlavor,
affFlavor,
mergeType=deps.DEP_MERGE_TYPE_PREFS)
# implement never drop an arch for dep resolution
currentArch = deps.getInstructionSetFlavor(affFlavor)
if not troveTup[2].stronglySatisfies(currentArch):
continue
if newFlavor.satisfies(troveTup[2]):
affinityMatches.append((newFlavor, troveTup))
affinityFlavors.append(troveTup[2])
found = True
if not found and not affinityMatches:
if installFlavor.satisfies(troveTup[2]):
otherMatches.append((installFlavor, troveTup))
otherFlavors.append(troveTup[2])
else:
otherMatches = [ (None, x) for x in troveTups ]
otherFlavors = [x[2] for x in troveTups]
if affinityMatches:
allFlavors = affinityFlavors
flavoredList = affinityMatches
else:
allFlavors = otherFlavors
flavoredList = otherMatches
# Now filter by flavor preferences.
newFlavors = []
if self.flavorPreferences:
for flavor in self.flavorPreferences:
for trvFlavor in allFlavors:
if trvFlavor.stronglySatisfies(flavor):
newFlavors.append(trvFlavor)
if newFlavors:
break
if newFlavors:
flavoredList = [ x for x in flavoredList if x[1][2] in newFlavors ]
return self._selectMatchingResolutionTrove(requiredBy, dep,
depClass, flavoredList)
def _selectMatchingResolutionTrove(self, requiredBy, dep, depClass,
flavoredList):
# this function should be an exact match of
# resolvemethod._selectMatchingResolutionTrove from conary 1.2 and
# later.
# finally, filter by latest then score.
trovesByNL = {}
for installFlavor, (n,v,f) in flavoredList:
l = v.trailingLabel()
myTimeStamp = v.timeStamps()[-1]
if installFlavor is None:
myScore = 0
else:
# FIXME: we should cache this scoring from before.
myScore = installFlavor.score(f)
if (n,l) in trovesByNL:
curScore, curTimeStamp, curTup = trovesByNL[n,l]
if curTimeStamp > myTimeStamp:
continue
if curTimeStamp == myTimeStamp:
if myScore < curScore:
continue
trovesByNL[n,l] = (myScore, myTimeStamp, (n,v,f))
scoredList = sorted(trovesByNL.itervalues())
if not scoredList:
return None
else:
# highest score, then latest timestamp, then name.
return scoredList[-1][-1]
if hasattr(resolve.BasicResolutionMethod,
'_selectMatchingResolutionTrove'):
selectResolutionTrove = resolve.BasicResolutionMethod.selectResolutionTrove
_selectMatchingResolutionTrove = resolve.BasicResolutionMethod._selectMatchingResolutionTrove
class rMakeResolveSource(ResolutionMesh):
"""
Resolve by trove list first and then resort back to label
path. Also respects intra-trove deps. If foo:runtime
requires foo:lib, it requires exactly the same version of foo:lib.
"""
def __init__(self, cfg, builtTroveSource, resolveTroveSource,
troveLists, repos):
self.removeFileDependencies = False
self.builtTroveSource = builtTroveSource
self.troveLists = troveLists
self.resolveTroveSource = resolveTroveSource
self.repos = repos
self.cfg = cfg
self.repos = repos
self.flavor = cfg.flavor
sources = []
builtResolveSource = resolve.BasicResolutionMethod(cfg, None)
builtResolveSource.setTroveSource(builtTroveSource)
sources = []
if troveLists:
troveListSources = [resolve.DepResolutionByTroveList(cfg, None, x)
for x in troveLists]
[ x.setTroveSource(self.repos) for x in troveListSources ]
sources.extend(troveListSources)
mainMethod = resolve.ResolutionStack(*sources)
flavorPreferences = self.repos._flavorPreferences
for source in sources:
source.setFlavorPreferences(flavorPreferences)
ResolutionMesh.__init__(self, cfg, builtResolveSource, mainMethod)
self.setFlavorPreferences(flavorPreferences)
def close(self):
self.builtTroveSource.close()
def setLabelPath(self, labelPath):
if labelPath:
source = resolve.DepResolutionByLabelPath(self.cfg, None, labelPath)
source.setTroveSource(self.repos)
self.mainMethod.addSource(source)
def prepareForResolution(self, depList):
# need to get intratrove deps while we still have the full dependency
# request information - including what trove the dep arises from.
intraDeps = self._getIntraTroveDeps(depList)
self.intraDeps = intraDeps
return ResolutionMesh.prepareForResolution(self, depList)
def _resolveIntraTroveDeps(self, intraDeps):
trovesToGet = []
for depSet, deps in intraDeps.iteritems():
for dep, troveTups in deps.iteritems():
trovesToGet.extend(troveTups)
hasTroves = self.troveSource.hasTroves(trovesToGet)
if isinstance(hasTroves, list):
hasTroves = dict(itertools.izip(trovesToGet, hasTroves))
results = {}
for depSet, deps in intraDeps.iteritems():
d = {}
results[depSet] = d
for dep, troveTups in deps.iteritems():
d[dep] = [ x for x in troveTups if hasTroves[x] ]
return results
def resolveDependencies(self):
sugg = ResolutionMesh.resolveDependencies(self)
intraDepSuggs = self._resolveIntraTroveDeps(self.intraDeps)
for depSet, intraDeps in self.intraDeps.iteritems():
for idx, (depClass, dep) in enumerate(depSet.iterDeps(sort=True)):
if depClass.tag == deps.DEP_CLASS_TROVES:
if (dep in intraDepSuggs[depSet]
and intraDepSuggs[depSet][dep]):
sugg[depSet][idx] = intraDepSuggs[depSet][dep]
return sugg
def _getIntraTroveDeps(self, depList):
suggsByDep = {}
intraDeps = {}
for troveTup, depSet in depList:
pkgName = troveTup[0].split(':', 1)[0]
for dep in depSet.iterDepsByClass(deps.TroveDependencies):
if (dep.name.startswith(pkgName)
and dep.name.split(':', 1)[0] == pkgName):
troveToGet = (dep.name, troveTup[1], troveTup[2])
l = suggsByDep.setdefault(dep, [])
l.append(troveToGet)
intraDeps.setdefault(depSet, {}).setdefault(dep, l)
return intraDeps
def filterDependencies(self, depList):
if self.removeFileDependencies:
depList = [(x[0], flavorutil.removeFileDeps(x[1]))
for x in depList ]
return [ x for x in depList if not x[1].isEmpty() ]
return depList
def _selectMatchingResolutionTrove(self, requiredBy, dep, depClass,
flavoredList):
# if all packages are the same and only their flavor score or timestamp
# is keeping one from being picked over the other, prefer the
# newly built package.
builtTroves = []
resolveTroves = []
newList = flavoredList
if self.resolveTroveSource:
minResolveIdx = len(self.resolveTroveSource.sources)
ilp = self.cfg.installLabelPath
for installFlavor, troveTup in flavoredList:
if self.extraMethod.troveSource.hasTrove(*troveTup):
branch = troveTup[1].branch()
if branch.hasParentBranch():
label = branch.parentBranch().label()
else:
label = branch.label()
list = builtTroves
elif (self.resolveTroveSource
and self.resolveTroveSource.hasTrove(*troveTup)):
# if a package is both in the resolveTroves list
# and found via ILP, it might be in this list even
# though it was not found via resolveTroves. So we
# limit results to ones found as early as possible
# in the resolveTroves list
for resolveIdx, source in enumerate(self.resolveTroveSource.sources):
if source.hasTrove(*troveTup):
if resolveIdx < minResolveIdx:
resolveTroves = []
minResolveIdx = resolveIdx
break
if resolveIdx > minResolveIdx:
continue
list = resolveTroves
label = troveTup[1].trailingLabel()
else:
continue
if label in ilp:
index = ilp.index(label)
else:
index = len(ilp)
list.append((index, (installFlavor, troveTup)))
if builtTroves:
minIndex = sorted(builtTroves, key=lambda x: x[0])[0][0]
newList = [ x[1] for x in builtTroves if x[0] == minIndex ]
elif resolveTroves:
minIndex = sorted(resolveTroves, key=lambda x: x[0])[0][0]
newList = [ x[1] for x in resolveTroves if x[0] == minIndex ]
return ResolutionMesh._selectMatchingResolutionTrove(self, requiredBy,
dep,
depClass, newList)
|
apache-2.0
| -3,809,415,997,018,213,000
| 41.872727
| 119
| 0.560057
| false
| 4.353025
| false
| false
| false
|
songyi199111/sentry
|
src/sentry/event_manager.py
|
2
|
19299
|
"""
sentry.event_manager
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import math
import six
from datetime import datetime, timedelta
from django.conf import settings
from django.db import IntegrityError, transaction
from django.utils import timezone
from hashlib import md5
from raven.utils.encoding import to_string
from uuid import uuid4
from sentry.app import buffer, tsdb
from sentry.constants import (
CLIENT_RESERVED_ATTRS, LOG_LEVELS, DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH,
MAX_TAG_VALUE_LENGTH
)
from sentry.interfaces.base import get_interface
from sentry.models import (
Activity, Event, EventMapping, Group, GroupHash, GroupStatus, Project,
Release, UserReport
)
from sentry.plugins import plugins
from sentry.signals import regression_signal
from sentry.utils.logging import suppress_exceptions
from sentry.tasks.index import index_event
from sentry.tasks.merge import merge_group
from sentry.tasks.post_process import post_process_group
from sentry.utils.db import get_db_engine
from sentry.utils.safe import safe_execute, trim, trim_dict
def count_limit(count):
# TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ?
# ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25)
for amount, sample_rate in settings.SENTRY_SAMPLE_RATES:
if count <= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_RATE
def time_limit(silence): # ~ 3600 per hour
for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES:
if silence >= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_TIME
def md5_from_hash(hash_bits):
result = md5()
for bit in hash_bits:
result.update(to_string(bit))
return result.hexdigest()
def get_hashes_for_event(event):
interfaces = event.get_interfaces()
for interface in interfaces.itervalues():
result = interface.compute_hashes(event.platform)
if not result:
continue
return result
return [[event.message]]
def get_hashes_from_fingerprint(event, fingerprint):
default_values = set(['{{ default }}', '{{default}}'])
if any(d in fingerprint for d in default_values):
default_hashes = get_hashes_for_event(event)
hash_count = len(default_hashes)
else:
hash_count = 1
hashes = []
for idx in xrange(hash_count):
result = []
for bit in fingerprint:
if bit in default_values:
result.extend(default_hashes[idx])
else:
result.append(bit)
hashes.append(result)
return hashes
if not settings.SENTRY_SAMPLE_DATA:
def should_sample(current_datetime, last_seen, times_seen):
return False
else:
def should_sample(current_datetime, last_seen, times_seen):
silence_timedelta = current_datetime - last_seen
silence = silence_timedelta.days * 86400 + silence_timedelta.seconds
if times_seen % count_limit(times_seen) == 0:
return False
if times_seen % time_limit(silence) == 0:
return False
return True
def plugin_is_regression(group, event):
project = event.project
for plugin in plugins.for_project(project):
result = safe_execute(plugin.is_regression, group, event,
version=1, _with_transaction=False)
if result is not None:
return result
return True
class ScoreClause(object):
def __init__(self, group):
self.group = group
def __int__(self):
# Calculate the score manually when coercing to an int.
# This is used within create_or_update and friends
return self.group.get_score()
def prepare_database_save(self, unused):
return self
def prepare(self, evaluator, query, allow_joins):
return
def evaluate(self, node, qn, connection):
engine = get_db_engine(getattr(connection, 'alias', 'default'))
if engine.startswith('postgresql'):
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif engine.startswith('mysql'):
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = int(self)
return (sql, [])
@classmethod
def calculate(self, times_seen, last_seen):
return math.log(times_seen) * 600 + float(last_seen.strftime('%s'))
class EventManager(object):
logger = logging.getLogger('sentry.events')
def __init__(self, data, version='5'):
self.data = data
self.version = version
def normalize(self):
# TODO(dcramer): store http.env.REMOTE_ADDR as user.ip
# First we pull out our top-level (non-data attr) kwargs
data = self.data
if not isinstance(data.get('level'), (six.string_types, int)):
data['level'] = logging.ERROR
elif data['level'] not in LOG_LEVELS:
data['level'] = logging.ERROR
if not data.get('logger'):
data['logger'] = DEFAULT_LOGGER_NAME
else:
data['logger'] = trim(data['logger'], 64)
if data.get('platform'):
data['platform'] = trim(data['platform'], 64)
timestamp = data.get('timestamp')
if not timestamp:
timestamp = timezone.now()
if isinstance(timestamp, datetime):
# We must convert date to local time so Django doesn't mess it up
# based on TIME_ZONE
if settings.TIME_ZONE:
if not timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=timezone.utc)
elif timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=None)
timestamp = float(timestamp.strftime('%s'))
data['timestamp'] = timestamp
if not data.get('event_id'):
data['event_id'] = uuid4().hex
data.setdefault('message', None)
data.setdefault('culprit', None)
data.setdefault('time_spent', None)
data.setdefault('server_name', None)
data.setdefault('site', None)
data.setdefault('checksum', None)
data.setdefault('fingerprint', None)
data.setdefault('platform', None)
data.setdefault('extra', {})
data.setdefault('errors', [])
tags = data.get('tags')
if not tags:
tags = []
# full support for dict syntax
elif isinstance(tags, dict):
tags = tags.items()
# prevent [tag, tag, tag] (invalid) syntax
elif not all(len(t) == 2 for t in tags):
tags = []
else:
tags = list(tags)
data['tags'] = []
for key, value in tags:
key = six.text_type(key).strip()
value = six.text_type(value).strip()
if not (key and value):
continue
if len(value) > MAX_TAG_VALUE_LENGTH:
continue
data['tags'].append((key, value))
if not isinstance(data['extra'], dict):
# throw it away
data['extra'] = {}
trim_dict(
data['extra'], max_size=settings.SENTRY_MAX_EXTRA_VARIABLE_SIZE)
# TODO(dcramer): more of validate data needs stuffed into the manager
for key in data.keys():
if key in CLIENT_RESERVED_ATTRS:
continue
value = data.pop(key)
try:
interface = get_interface(key)()
except ValueError:
continue
try:
inst = interface.to_python(value)
data[inst.get_path()] = inst.to_json()
except Exception:
pass
data['version'] = self.version
# TODO(dcramer): find a better place for this logic
exception = data.get('sentry.interfaces.Exception')
stacktrace = data.get('sentry.interfaces.Stacktrace')
if exception and len(exception['values']) == 1 and stacktrace:
exception['values'][0]['stacktrace'] = stacktrace
del data['sentry.interfaces.Stacktrace']
if 'sentry.interfaces.Http' in data:
# default the culprit to the url
if not data['culprit']:
data['culprit'] = data['sentry.interfaces.Http']['url']
if data['time_spent']:
data['time_spent'] = int(data['time_spent'])
if data['culprit']:
data['culprit'] = trim(data['culprit'], MAX_CULPRIT_LENGTH)
if data['message']:
data['message'] = trim(
data['message'], settings.SENTRY_MAX_MESSAGE_LENGTH)
return data
@suppress_exceptions
def save(self, project, raw=False):
# TODO: culprit should default to "most recent" frame in stacktraces when
# it's not provided.
project = Project.objects.get_from_cache(id=project)
data = self.data.copy()
# First we pull out our top-level (non-data attr) kwargs
event_id = data.pop('event_id')
message = data.pop('message')
level = data.pop('level')
culprit = data.pop('culprit', None) or ''
time_spent = data.pop('time_spent', None)
logger_name = data.pop('logger', None)
server_name = data.pop('server_name', None)
site = data.pop('site', None)
checksum = data.pop('checksum', None)
fingerprint = data.pop('fingerprint', None)
platform = data.pop('platform', None)
release = data.pop('release', None)
date = datetime.fromtimestamp(data.pop('timestamp'))
date = date.replace(tzinfo=timezone.utc)
kwargs = {
'message': message,
'platform': platform,
}
event = Event(
project=project,
event_id=event_id,
data=data,
time_spent=time_spent,
datetime=date,
**kwargs
)
tags = data.get('tags') or []
tags.append(('level', LOG_LEVELS[level]))
if logger_name:
tags.append(('logger', logger_name))
if server_name:
tags.append(('server_name', server_name))
if site:
tags.append(('site', site))
if release:
# TODO(dcramer): we should ensure we create Release objects
tags.append(('sentry:release', release))
for plugin in plugins.for_project(project, version=None):
added_tags = safe_execute(plugin.get_tags, event,
_with_transaction=False)
if added_tags:
tags.extend(added_tags)
# XXX(dcramer): we're relying on mutation of the data object to ensure
# this propagates into Event
data['tags'] = tags
# prioritize fingerprint over checksum as its likely the client defaulted
# a checksum whereas the fingerprint was explicit
if fingerprint:
hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint))
elif checksum:
hashes = [checksum]
else:
hashes = map(md5_from_hash, get_hashes_for_event(event))
group_kwargs = kwargs.copy()
group_kwargs.update({
'culprit': culprit,
'logger': logger_name,
'level': level,
'last_seen': date,
'first_seen': date,
'time_spent_total': time_spent or 0,
'time_spent_count': time_spent and 1 or 0,
})
if release:
release = Release.get_or_create(
project=project,
version=release,
date_added=date,
)
group_kwargs['first_release'] = release
Activity.objects.create(
type=Activity.RELEASE,
project=project,
ident=release,
data={'version': release},
datetime=date,
)
group, is_new, is_regression, is_sample = safe_execute(
self._save_aggregate,
event=event,
hashes=hashes,
**group_kwargs
)
using = group._state.db
event.group = group
event.group_id = group.id
# store a reference to the group id to guarantee validation of isolation
event.data.bind_ref(event)
try:
with transaction.atomic():
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
self.logger.info('Duplicate EventMapping found for event_id=%s', event_id)
return event
UserReport.objects.filter(
project=project, event_id=event_id,
).update(group=group)
# save the event unless its been sampled
if not is_sample:
try:
with transaction.atomic():
event.save()
except IntegrityError:
self.logger.info('Duplicate Event found for event_id=%s', event_id)
return event
if is_new and release:
buffer.incr(Release, {'new_groups': 1}, {
'id': release.id,
})
safe_execute(Group.objects.add_tags, group, tags,
_with_transaction=False)
if not raw:
post_process_group.delay(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample,
is_regression=is_regression,
)
else:
self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id)
index_event.delay(event)
# TODO: move this to the queue
if is_regression and not raw:
regression_signal.send_robust(sender=Group, instance=group)
return event
def _find_hashes(self, project, hash_list):
matches = []
for hash in hash_list:
ghash, _ = GroupHash.objects.get_or_create(
project=project,
hash=hash,
)
matches.append((ghash.group_id, ghash.hash))
return matches
def _ensure_hashes_merged(self, group, hash_list):
# TODO(dcramer): there is a race condition with selecting/updating
# in that another group could take ownership of the hash
bad_hashes = GroupHash.objects.filter(
project=group.project,
hash__in=hash_list,
).exclude(
group=group,
)
if not bad_hashes:
return
for hash in bad_hashes:
merge_group.delay(
from_group_id=hash.group_id,
to_group_id=group.id,
)
return GroupHash.objects.filter(
project=group.project,
hash__in=bad_hashes,
).update(
group=group,
)
def _save_aggregate(self, event, hashes, **kwargs):
time_spent = event.time_spent
project = event.project
# attempt to find a matching hash
all_hashes = self._find_hashes(project, hashes)
try:
existing_group_id = (h[0] for h in all_hashes if h[0]).next()
except StopIteration:
existing_group_id = None
# XXX(dcramer): this has the opportunity to create duplicate groups
# it should be resolved by the hash merging function later but this
# should be better tested/reviewed
if existing_group_id is None:
kwargs['score'] = ScoreClause.calculate(1, kwargs['last_seen'])
group, group_is_new = Group.objects.create(
project=project,
**kwargs
), True
else:
group = Group.objects.get(id=existing_group_id)
group_is_new = False
# If all hashes are brand new we treat this event as new
is_new = False
new_hashes = [h[1] for h in all_hashes if h[0] is None]
if new_hashes:
affected = GroupHash.objects.filter(
project=project,
hash__in=new_hashes,
group__isnull=True,
).update(
group=group,
)
if affected != len(new_hashes):
self._ensure_hashes_merged(group, new_hashes)
elif group_is_new and len(new_hashes) == len(all_hashes):
is_new = True
# XXX(dcramer): it's important this gets called **before** the aggregate
# is processed as otherwise values like last_seen will get mutated
can_sample = should_sample(event.datetime, group.last_seen, group.times_seen)
if not is_new:
is_regression = self._process_existing_aggregate(group, event, kwargs)
else:
is_regression = False
# Determine if we've sampled enough data to store this event
if is_new or is_regression:
is_sample = False
else:
is_sample = can_sample
tsdb.incr_multi([
(tsdb.models.group, group.id),
(tsdb.models.project, project.id),
])
return group, is_new, is_regression, is_sample
def _process_existing_aggregate(self, group, event, data):
date = max(event.datetime, group.last_seen)
extra = {
'last_seen': date,
'score': ScoreClause(group),
}
if event.message and event.message != group.message:
extra['message'] = event.message
if group.level != data['level']:
extra['level'] = data['level']
if group.culprit != data['culprit']:
extra['culprit'] = data['culprit']
is_regression = False
if group.is_resolved() and plugin_is_regression(group, event):
is_regression = bool(Group.objects.filter(
id=group.id,
# ensure we cant update things if the status has been set to
# muted
status__in=[GroupStatus.RESOLVED, GroupStatus.UNRESOLVED],
).exclude(
# add to the regression window to account for races here
active_at__gte=date - timedelta(seconds=5),
).update(
active_at=date,
# explicitly set last_seen here as ``is_resolved()`` looks
# at the value
last_seen=date,
status=GroupStatus.UNRESOLVED
))
group.active_at = date
group.status = GroupStatus.UNRESOLVED
group.last_seen = extra['last_seen']
update_kwargs = {
'times_seen': 1,
}
if event.time_spent:
update_kwargs.update({
'time_spent_total': event.time_spent,
'time_spent_count': 1,
})
buffer.incr(Group, update_kwargs, {
'id': group.id,
}, extra)
return is_regression
|
bsd-3-clause
| -1,338,582,620,076,449,300
| 31.654822
| 97
| 0.56604
| false
| 4.204575
| false
| false
| false
|
theodoregoetz/wernher
|
sandbox/KRPC Testing.py
|
1
|
3144
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%run -i 'KRPC.ipynb'
# <codecell>
conn = krpc.connect(name='laptop', address='192.168.1.9')
ksc = conn.space_center
vessel = ksc.active_vessel
obt = vessel.orbit
ap = vessel.auto_pilot
con = vessel.control
vrf = vessel.reference_frame
srfrf = vessel.surface_reference_frame
vobtrf = vessel.orbital_reference_frame
obtrf = obt.body.reference_frame
obtorf = obt.body.orbital_reference_frame
obtnrrf = obt.body.non_rotating_reference_frame
flight = lambda rf: vessel.flight(rf)
# <codecell>
t = ksc.ut
o = KeplerOrbit(obt)
f = flight(obtorf)
print(obt.time_to_apoapsis, obt.time_to_periapsis)
print(f.longitude)
print(o.Ω * 180/π)
print(o.ν * 180/π)
# <codecell>
speed = conn.add_stream(getattr, flight(srfrf), 'speed')
altitude = conn.add_stream(getattr, flight(obtrf), 'mean_altitude')
apoapsis = conn.add_stream(getattr, obt, 'apoapsis_altitude')
# <codecell>
con.throttle = 0.6
ap.set_rotation(90, 90, roll=90)
time.sleep(1)
con.activate_next_stage()
while flight(obtrf).speed < 100.:
time.sleep(0.1)
ap.set_rotation(80, 90, roll=90)
while flight(obtrf).mean_altitude < 5000.:
time.sleep(0.1)
ap.disengage()
ap.sas = True
ap.sas_mode = ksc.SASMode.prograde
while obt.apoapsis_altitude < 80000:
time.sleep(0.1)
ap.sas_mode = ksc.SASMode.stability_assist
ap.sas = False
while abs(obt.eccentricity) > 0.1:
obt.apoapsis
ap.set_direction(, 90, roll=90)
ap.disengage()
con.throttle = 0.
# <codecell>
ksc.SASMode.prograde
# <codecell>
speed.remove()
altitude.remove()
apoapsis.remove()
# <codecell>
def prelaunch(conn):
ksc = conn.space_center
vessel = ksc.active_vessel
obtbody_rf = vessel.orbit.body.reference_frame
flight = vessel.flight
ap = vessel.auto_pilot
cont = vessel.control
vessel
ut = conn.add_stream(getattr, ksc, 'ut')
mean_altitude = conn.add_stream(getattr, flight(), 'mean_altitude')
#position = conn.add_stream(vessel.position, obtbody_rf)
timestamp = []
altitude = []
t0 = ut()
alt = mean_altitude()
while alt < 80000:
t1 = ut()
alt = mean_altitude()
if abs(t1 - t0) > 0.001:
timestamp.append(t1)
altitude.append(alt)
t0 = t1
time.sleep(1./25.)
# <codecell>
print(ut())
# <codecell>
pyplot.plot(timestamp,altitude)
# <codecell>
print(vessel.name)
print(vessel.met)
print(vessel.mass)
print(vessel.position(vessel.orbit.body.reference_frame))
# <codecell>
def latlon(vessel):
x,y,z = vessel.position(vessel.orbit.body.reference_frame)
r = np.sqrt(x*x + y*y + z*z)
lat = 90. - np.arccos(y / r) * 180. / np.pi
lon = np.arctan2(z, x) * 180. / np.pi
return lat,lon
# <codecell>
data = []
# <codecell>
image = pyplot.imread('/home/goetz/kerbin.jpg')
fig, ax = pyplot.subplots(figsize=(15,7))
im = ax.imshow(image)
ax.set_autoscale_on(False)
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
lat,lon = latlon(vessel)
xmap = ((lon + 180.) / 360.) * (xmax - xmin) + xmin
ymap = ((lat + 90.) / 180.) * (ymax - ymin) + ymin
pt = ax.plot(xmap,ymap, marker='o', color='cyan')
|
gpl-3.0
| 4,061,962,225,214,920,700
| 17.690476
| 67
| 0.660828
| false
| 2.470496
| false
| false
| false
|
saltzm/yadi
|
yadi/datalog2sql/parse2tokens/parser_tests.py
|
1
|
6246
|
from .Parser import Parser
p = Parser()
#Tests to check syntax
print(p.parsesentence("q.")) # Atom, zero arity
print(p.parsesentence("q(x).")) # Atom, one var
print(p.parsesentence("q('3').")) # Atom, string
print(p.parsesentence("q(x,y).")) # Atom, two-arity
print(p.parsesentence("q(_,x).")) # Atom, anonymous variable
print(p.parsesentence("_ab(a).")) # Predicate symbol with underscore
print(p.parsesentence("q2(x,z,b,'a').")) # Predicate symbol with number
print(p.parsesentence("__ab_55(a,b,c).")) # Predicate symbol with number and underscore
print(p.parsesentence("q(x,y) :- k(x,y).")) # Rule with one literal
print(p.parsesentence("q(x,y) :- a(foo_foo).")) # Rule with one literal using constant
print(p.parsesentence("q(x,y) :- k(_ab).")) # Rule with one literal with constant starting with underscore
print(p.parsesentence("q(x,y) :- k(X).")) # Rule with one literal with one variable
print(p.parsesentence("q(x,y) :- k(x,h), _v3(n,k).")) # Rule with two literals
print(p.parsesentence("q(x,y) :- a;b.")) # Rule with disjunction of two zero-arity atoms
print(p.parsesentence("q(x,y) :- a(x);b(x).")) # Rule with disjunction of two 1-arity atoms
print(p.parsesentence("q(x,y) :- a division b.")) # Rule with division of two zero-arity atoms
print(p.parsesentence("q(x,y) :- a(x,y) division b(x,y).")) # Rule with division of two two-arity atoms
print(p.parsesentence("q(x,y,z) :- a(x),a;b.")) # Rule with one-arity atom, disjunction of two zero-arity atoms
print(p.parsesentence("q(x,y) :- a(x), t>5.")) # Rule with one-arity atom, boolean comparison
print(p.parsesentence("q(x,y) :- a(x), t<5.")) # Rule with one-arity atom, boolean comparison
print(p.parsesentence("q(x,y) :- a(x), t>=5.")) # Rule with one-arity atom, boolean comparison
print(p.parsesentence("q(x,y) :- a(x), t<=5.")) # Rule with one-arity atom, boolean comparison
print(p.parsesentence("q(x,y) :- a(x), gd=5.")) # Rule with one-arity atom, boolean comparison
print(p.parsesentence("q(x,y,z) :- a(x), t=4.0.")) # Rule with one-arity atom, comparison using float
print(p.parsesentence("q(x,y,z) :- a(x), t=4.0E6.")) # Rule with one-arity atom, comparison using float+E
print(p.parsesentence("q(x,y,z) :- a(x), t=4.0E+6.")) # Rule with one-arity atom, comparison using float+E+'+'
print(p.parsesentence("q(x,y,z) :- a(x), t=4.0E-6.")) # Rule with one-arity atom, comparison using float+E+'-'
print(p.parsesentence("q(x,y,z) :- a(x), t=4.0, k(x).")) # Rule with one-arity atom, comparison, atom
print(p.parsesentence("q(x) :- x(g), not(a(x,y)).")) # Rule with one-arity atom, negation
print(p.parsesentence("q(x,y). k(x).")) # Two facts in a line.
print(p.parsesentence("q(x,y). q(x,y) :- a(b,c).")) # A fact and a rule in a line.
print(p.parsesentence("q(x,y). q(x,y) :- a(b,c). a(b).")) # A fact, a rule and a fact in a line.
print(p.parsesentence("q(x,y) :- a(b), X=3; Y>5.")) # Rule with one-arity atom, disjunctive comparison.
print(p.parsesentence("q(x,y) :- a(b), X=3, Y>5.")) # Rule with one-arity atom, conjunctive comparison.
print(p.parsesentence("q(x,y) :- a(b), X=3, Y>5, X=3; Y>5.")) # Rule with one-arity atom, two two-term comparisons.
print(p.parsesentence("r(X) :- not(t(Y)), X = Y, s(Y).")) # Rule with a negation in front.
print(p.parsesentence("r(x) :- r(a,X); not(q(X,b)), lj(a,b,x).")) # Rule with a left join
print(p.parsesentence("q(X,Z) :- s(X,Y), not(t(X)), Y=Z."))
print(p.parsesentence("q(X,Z) :- t>5, s(X,Y), not(t(X)), Y=Z."))
print(p.parsesentence("q(X,Y):- s(X).\nq(X,Y):- s(Y).")) # Two statements broken down in two lines.
print(p.parsesentence("q(x,y) :- a(b), X=3, 3>Y, X=3; 5>X.")) # Rule with one-arity atom, two two-term comparisons.
print(p.parsesentence("q(X,Y), s(x).")) # Temporary view
print(p.parsesentence("q(X,Y), not(x(t,y)).")) # Temporary view
print(p.parsesentence("q(X,Y):- s(X).\nq(X,Y):- s(X).\nq(X,Y):- s(X)."))
print(p.parsesentence("q(X,3) :- s(X)."))
#Incorporation of all elements
print(p.parsesentence("a45(x,Y,_343,a) :- x43A(k,5,x), A>=4; t=5, a(q,x);r(x,Y), a division y. q(x,y)."))
#Rules (that actually make sense)
print(p.parsesentence("q(X,Y):- s(X)."))
print(p.parsesentence("q(X):- s(X)."))
print(p.parsesentence("q(X):- s(X), not(t(U))."))
print(p.parsesentence("q(X):- s(X,U), not(t(U))."))
print(p.parsesentence("q(X):- s(X), not(t(U)), U = 2."))
print(p.parsesentence("q(X):- s(X), not(t(U)), U < 2."))
print(p.parsesentence("q(X):- s(X), not(t(U)), U = X."))
print(p.parsesentence("q(X):- s(X), Y < 3."))
print(p.parsesentence("q(X):- s(X,Y), Y < 3."))
print(p.parsesentence("q(X):- s(X), not(t(Y)), X = Y."))
print(p.parsesentence("q(X,Z):- s(X,Y), not(t(A,Z)), Z = Y."))
print(p.parsesentence("q(X):- s(X), X = 2."))
print(p.parsesentence("q(X):- s(X, Y), Y = 2."))
print(p.parsesentence("q(X):- s(X, Y, Z), Y = 2, Z = Y."))
print(p.parsesentence("q(X) :- not(s(Y)), X = 2, X = Y."))
print(p.parsesentence("q(X) :- not(s(Y)), X = Y, X = 2."))
print(p.parsesentence("q(X) :- s(X), X = Y."))
print(p.parsesentence("q(X) :- s(X), P = Y."))
print(p.parsesentence("r(X) :- s(X), 3=X, X>2."))
print(p.parsesentence("r(Y) :- s(X), Y=X, X=2, Y =4."))
print(p.parsesentence("r(X,Y,Z,_,2) :- s(X), Y=X, X=2."))
print(p.parsesentence("q(X,Y) :- s(_,Y), t(X,_), u(_), v(_,_)."))
print(p.parsesentence("q(x,y)."))
print(p.parsesentence("q(X,Y) :- s(_,Y), t(X,_), u(_), v(_,_)."))
#Errors
#print(p.parsesentence("q(x,y,,)."))
#print(p.parsesentence("r(Title1,Title2,Release_date):-movie(Title1,,,Release_date),movie(Title2,,,Release_date)."))
#print(p.parsesentence("r(x):-q(x),s(x,,,,,)."))
#print(p.parsesentence("q(x,)."))
|
bsd-3-clause
| -3,213,480,592,657,898,000
| 72.482353
| 127
| 0.553154
| false
| 2.666951
| false
| true
| false
|
blaisb/cfdemUtilities
|
cylinderPorosity/pythons/getParticlePositionsFOAM.py
|
1
|
2828
|
# This program converts OpenFOAM raw data to a text file containing information on the particles
# in the format that can be read by the porosity code
#
# position (x y z) and radius
# THIS PROGRAM REQUIRES A DIRECTORY particles in the main folder
#In the current form of the software the radius must be fixed byu the user
# Author : Bruno Blais
# Last modified : 15-01-2014
#Python imports
#----------------
import os
import sys
import numpy
#----------------
#********************************
# OPTIONS AND USER PARAMETERS
#********************************
#Initial time of simulation, final time and time increment must be specified by user
t0=5
tf=115.0
dT=5
radius = 0.0007485
height=0.05
ri = 0.0064
ro = 0.0238
#====================
# READER
#====================
#This function reads an OpenFOAM raw file and extract a table of the data
def readf(fname):
infile = open(fname,'r')
if (infile!=0):
#Clear garbage lines
for i in range(0,17):
infile.readline()
#Read number of cell centers
n=int(infile.readline())
#Pre-allocate memory
x=numpy.zeros([n])
y=numpy.zeros([n])
z=numpy.zeros([n])
#Clear garbage line "("
infile.readline()
#read current property "xu"
for i in range(0,n,1):
number_str=infile.readline()
number2_str=number_str.split("(")
number3_str=number2_str[1].split(")")
number4_str=number3_str[0].split()
x[i]=float(number4_str[0])
y[i]=float(number4_str[1])
z[i]=float(number4_str[2])
else:
print "File %s could not be opened" %fname
infile.close();
return n,x,y,z
#======================
# MAIN
#======================
#Name of the files to be considered
inname= ['lagrangian/particleCloud/positions']
os.chdir("./") # go to directory
nt=int((tf-t0)/dT)
t=t0
for i in range(0,nt):
#Current case
print "Post-processing time ", t
#Go to the directory corresponding to the timestep
if (t>0.99999 and t<1.0000001) : os.chdir("1")
elif (t==0) : os.chdir("0")
elif ((numpy.abs(numpy.mod(t,1)))<0.01): os.chdir(str(int(t)))
else :os.chdir(str(t))
#Create output file back in main folder
outname="../particlesInfo/particlesInfo_%s" %str(i)
outfile=open(outname,'w')
#Read each variables to be able to dimensionalise final array
[n,x,y,z] = readf(inname[0])
#Write header
outfile.write("%i\n" %nt)
outfile.write("%5.5e\n" %height)
outfile.write("%5.5e\n" %ri)
outfile.write("%5.5e\n" %ro)
outfile.write("%i\n" %n)
outfile.write("%5.5e\n" %t)
outfile.write("**************************************************\n")
for j in range(0,n):
outfile.write("%5.5e %5.5e %5.5e %5.5e \n" %(x[j],y[j],z[j],radius))
outfile.close()
t += dT
#Go back to CFD directory
os.chdir("..") #
print "Post-processing over"
|
lgpl-3.0
| -5,991,427,169,702,565,000
| 22.966102
| 96
| 0.594413
| false
| 3.050701
| false
| false
| false
|
travistang/late_fyt
|
model.py
|
1
|
10888
|
from keras.models import *
from keras.layers import *
from keras.layers.advanced_activations import *
from keras.callbacks import *
from keras.optimizers import Adam
from keras.initializers import *
import tensorflow as tf
from utils import huber_loss
def guide_v1():
S = Input(shape = (64,64,12))
x = Convolution2D(32,8,8,subsample = (4,4),activation = 'relu')(S)
x = BatchNormalization()(x)
x = Convolution2D(32,4,4,subsample = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Convolution2D(64,4,4,subsample = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Flatten()(x)
# z = Dense(128,init = 'uniform',activation = 'relu',name = 'ls_1',trainable = False)(x)
# ls = Dense(29,init = 'uniform',activation = 'relu',name = 'ls_2',trainable = False)(z)
y = Dense(300,activation = 'relu',name = 'act_1')(x)
Steering = Dense(1,activation = 'linear',name = 'act_2')(y)
#Steering = Dense(1,weights = [np.random.uniform(-1e-8,1e-8,(512,1)),np.zeros((1,))], name='Steering')(lrn4)
model = Model(S,Steering)
adam = Adam(lr=0.00000001,decay = 1e-6)
K.get_session().run([adam.beta_1.initializer,adam.beta_2.initializer])
model.compile(loss='mse', optimizer=adam)
if weight_files:
model.load_weights(weight_files)
return model, model.trainable_weights, S
def guide_v2():
S = Input(shape = (64,64,4))
x = Convolution2D(32,8,8,subsample = (4,4),activation = 'relu')(S)
x = BatchNormalization()(x)
x = Convolution2D(32,4,4,subsample = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Convolution2D(32,4,4,subsample = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Flatten()(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(8,activation = 'linear',name = 'act_2')(x)
model = Model(S,x)
adam = Adam(lr = 0.0001,decay = 1e-6)
model.compile(loss = 'categorial_accuracy',optimizer = adam)
return model
def low_guide_v1(lr = 0.0001,num_output = 9):
S = Input(shape = (116,))
x = Dense(300,activation = ELU())(S)
x = Dense(600,activation = ELU())(x)
x = Dense(num_output,activation = 'linear',init=lambda shape: normal(shape, scale=1e-4))(x)
model = Model(S,x)
adam = Adam(lr = lr,decay = 1e-6,clipnorm=0.5)
model.compile(loss = huber_loss(0.5),optimizer = adam)
return model
def low_guide_v2(num_action = 1,num_ob = 1):
# the actor
S = Input(shape = (1,num_ob))
x = Flatten()(S)
x = Dense(300,activation = 'relu')(x)
x = Dense(600,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
model = Model(S,x)
# the critic
A = Input(shape = (num_action,))
S = Input(shape = (1,num_ob))
s = Flatten()(S)
x = merge([A,s],mode = 'concat')
x = Dense(300,activation = 'relu')(x)
x = Dense(600,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return model,critic
def img_guide_v1(num_action = 1):
S = Input(shape = (1,64,64,3))
x = Reshape((64,64,3))(S)
x = Conv2D(16,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = Dense(600,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,64,64,3))
A = Input(shape = (num_action,))
x = Reshape((64,64,3))(S)
x = Conv2D(16,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = merge([A,x],mode = 'concat')
x = Dense(600,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
def img_guide_v2(num_action = 1,hist_len = 4):
S = Input(shape = (1,64,64,3 * hist_len))
x = Reshape((64,64,3 * hist_len))(S)
x = Conv2D(32,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = Dense(800,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,64,64,3 * hist_len))
A = Input(shape = (num_action,))
x = Reshape((64,64,3 * hist_len))(S)
x = Conv2D(32,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = merge([A,x],mode = 'concat')
x = Dense(800,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
def img_guide_v3(num_action = 1,hist_len = 4):
S = Input(shape = (1,hist_len,64,64,3))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(32,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = Flatten()(x)
x = Dense(800,activation = 'relu')(x)
x = Dense(400,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,hist_len,64,64,3))
A = Input(shape = (num_action,))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(32,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = Flatten()(x)
x = merge([A,x],mode = 'concat')
x = Dense(800,activation = 'relu')(x)
x = Dense(400,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
def stack_model(num_action = 1,hist_len = 4, num_filters = 16):
S = Input(shape = (1,64,64,3 * hist_len))
x = Reshape((64,64,3 * hist_len))(S)
x = Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = Dense(600 if num_filters == 16 else 800,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,64,64,3 * hist_len))
A = Input(shape = (num_action,))
x = Reshape((64,64,3 * hist_len))(S)
x = Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = BatchNormalization()(x)
x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x)
x = Flatten()(x)
x = merge([A,x],mode = 'concat')
x = Dense(600 if num_filters == 16 else 800,activation = 'relu')(x)
x = Dense(300,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
def fork_model(num_action = 1,hist_len = 4, num_filters = 16):
S = Input(shape = (1,hist_len,64,64,3))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = Flatten()(x)
x = Dense(600 if num_filters == 16 else 800,activation = 'relu')(x)
x = Dense(400,activation = 'relu')(x)
x = Dense(num_action,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,hist_len,64,64,3))
A = Input(shape = (num_action,))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = Flatten()(x)
x = merge([A,x],mode = 'concat')
x = Dense(800 if num_filters == 16 else 1200,activation = 'relu')(x)
x = Dense(400,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
def LSTM_model(num_action = 1,hist_len = 4, num_filters = 16):
S = Input(shape = (1,hist_len,64,64,3))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(Flatten())(x)
x = LSTM(100 if num_filters == 16 else 200,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
actor = Model(S,x)
S = Input(shape = (1,hist_len,64,64,3))
A = Input(shape = (num_action,))
x = Reshape((hist_len,64,64,3))(S)
x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(BatchNormalization())(x)
x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x)
x = TimeDistributed(Flatten())(x)
x = LSTM(100 if num_filters == 16 else 200,activation = 'relu')(x)
x = merge([A,x],mode = 'concat')
x = Dense(50,activation = 'relu')(x)
x = Dense(1,activation = 'linear')(x)
critic = Model([A,S],x)
return actor,critic
|
mit
| 4,400,644,651,622,553,000
| 40.090566
| 116
| 0.591293
| false
| 2.791795
| false
| false
| false
|
wummel/linkchecker-gui
|
linkcheck_gui/syntax.py
|
1
|
3578
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2011-2016 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from PyQt4 import QtCore, QtGui
def format (color, style=''):
"""Return a QTextCharFormat with the given attributes."""
format = QtGui.QTextCharFormat()
format.setForeground(getattr(QtCore.Qt, color))
if 'bold' in style:
format.setFontWeight(QtGui.QFont.Bold)
if 'italic' in style:
format.setFontItalic(True)
return format
class Highlighter (QtGui.QSyntaxHighlighter):
"""Base class for all highlighters."""
def __init__ (self, document):
"""Initialize rules and styles."""
super(Highlighter, self).__init__(document)
self.rules = []
self.styles = {}
def highlightBlock(self, text):
"""Highlight a text block."""
for expression, format in self.rules:
# get first match
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
# jump to next match
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
def addRule (self, pattern, style):
"""Add a rule pattern with given style."""
self.rules.append((QtCore.QRegExp(pattern), self.styles[style]))
class XmlHighlighter (Highlighter):
"""XML syntax highlighter."""
def __init__(self, document):
"""Set XML syntax rules."""
super(XmlHighlighter, self).__init__(document)
self.styles.update({
'keyword': format('darkBlue'),
'attribute': format('darkGreen'),
'comment': format('darkYellow'),
'string': format('darkMagenta'),
})
# keywords
for reg in ('/>', '>', '<!?[a-zA-Z0-9_]+'):
self.addRule(reg, 'keyword')
# attributes
self.addRule(r"\b[A-Za-z0-9_]+(?=\s*\=)", 'attribute')
# double-quoted string, possibly containing escape sequences
self.addRule(r'"[^"\\]*(\\.[^"\\]*)*"', 'string')
# single-quoted string, possibly containing escape sequences
self.addRule(r"'[^'\\]*(\\.[^'\\]*)*'", 'string')
# comments
self.addRule(r"<!--[^>]*-->", 'comment')
# Treat HTML as XML
HtmlHighlighter = XmlHighlighter
class IniHighlighter (Highlighter):
"""INI syntax highlighter."""
def __init__(self, document):
"""Set INI syntax rules."""
super(IniHighlighter, self).__init__(document)
self.styles.update({
'section': format('darkBlue'),
'property': format('darkGreen'),
'comment': format('darkYellow'),
})
self.addRule(r'\b\[[a-zA-Z0-9_]+\]\b', 'section')
self.addRule(r'\b[a-zA-Z0-9_]+\](?=\s*\=)', 'property')
self.addRule(r'#[^\n]*', 'comment')
|
gpl-3.0
| 4,945,731,341,848,713,000
| 35.510204
| 73
| 0.604807
| false
| 3.893362
| false
| false
| false
|
boisde/Greed_Island
|
business_logic/order_collector/transwarp/validate.py
|
1
|
2044
|
#!/usr/bin/env python
# coding:utf-8
import logging
RECORD_NORMAL = 0
RECORD_DELETED = 1
RECORD_CHOICE = (
(RECORD_NORMAL, u'正常'),
(RECORD_DELETED, u'已删除'),
)
def is_valid_kw(obj, is_update=False, **kw):
mappings = obj.__mappings__
if is_update and kw.get('deleted', None) == RECORD_DELETED:
raise ValueError("Illegal operation: Try to mark %s as deleted with update api." % obj.__name__)
elif is_update:
pass
# 检查是否要求存在的参数都存在
else:
args = set(kw.keys())
required = {key_name for key_name, orm_val in mappings.iteritems() if orm_val.nullable is False and orm_val.primary_key is False}
required -= {'deleted', 'create_time', 'update_time'}
if not required.issubset(args):
raise ValueError("Not providing required args: %s." % list(required-args))
# 检查参数类型
for key_name, kv in kw.iteritems():
if key_name in mappings:
orm_val = mappings[key_name]
if orm_val.ddl.find('int') != -1:
try:
int(kv)
except ValueError:
raise ValueError("[%s]:[%s][%s] should be type of [%s]." % (key_name, unicode(kv), type(kv), orm_val.ddl))
elif orm_val.ddl.find('char') != -1:
char_len = int(orm_val.ddl[orm_val.ddl.find('(') + 1:orm_val.ddl.find(')')])
if (not kv) and orm_val.nullable is True: # 参数值设置可以为空且传入参数就是空
continue
elif not isinstance(kv, unicode) and not isinstance(kv, str):
raise ValueError("[%s]:[%s][%s] should be type of str." % (key_name, unicode(kv), type(kv)))
elif kv and len(kv) > char_len:
raise ValueError("[%s]:[%s] should be str of length[%s]." % (key_name, unicode(kv), char_len))
else:
logging.warning("[%s]:[%s] won't be passed since [%s] is not valid." % (key_name, unicode(kv), key_name))
|
mit
| 302,318,775,476,008,200
| 43.568182
| 137
| 0.555612
| false
| 3.294118
| false
| false
| false
|
Rbeuque74/brie-aurore
|
Brie/brie/websetup.py
|
1
|
1479
|
# -*- coding: utf-8 -*-
"""Setup the Brie application"""
import logging
import transaction
from tg import config
from brie.config.environment import load_environment
__all__ = ['setup_app']
log = logging.getLogger(__name__)
def setup_app(command, conf, vars):
"""Place any commands to setup brie here"""
load_environment(conf.global_conf, conf.local_conf)
# Load the models
from brie import model
print "Creating tables"
model.metadata.create_all(bind=config['pylons.app_globals'].sa_engine)
manager = model.User()
manager.user_name = u'manager'
manager.display_name = u'Example manager'
manager.email_address = u'manager@somedomain.com'
manager.password = u'managepass'
model.DBSession.add(manager)
group = model.Group()
group.group_name = u'managers'
group.display_name = u'Managers Group'
group.users.append(manager)
model.DBSession.add(group)
permission = model.Permission()
permission.permission_name = u'manage'
permission.description = u'This permission give an administrative right to the bearer'
permission.groups.append(group)
model.DBSession.add(permission)
editor = model.User()
editor.user_name = u'editor'
editor.display_name = u'Example editor'
editor.email_address = u'editor@somedomain.com'
editor.password = u'editpass'
model.DBSession.add(editor)
model.DBSession.flush()
transaction.commit()
print "Successfully setup"
|
bsd-2-clause
| -5,205,969,752,594,157,000
| 24.947368
| 90
| 0.698445
| false
| 3.616137
| false
| false
| false
|
papallas/baxter_cashier
|
scripts/baxter_cashier_manipulation/src/environment_factory.py
|
1
|
6798
|
#!/usr/bin/env python
"""
Factory for Environments.
This file contains some static classes that represents environments in real
life. If Baxter for example is placed somewhere in a real environment let's
name it "Robotics Lab" then we wish to define obstacles around Baxter in this
specific environment. In this class we achieve exactly this, for each
environment that Baxter can be, we define the obstacles around him and using
the Factory Pattern and Template design pattern we are able to have
extensibility with a very nice way.
If you need to define a new environment here are the steps:
1. Define a similar class with the one listed below: `RoboticsLabEnvironment`
but make sure the obstacles implemented in `RoboticsLabEnvironment` match
you own obstacles in your environment, and make sure you give a sensible
name for the class.
2. In `EnvironmentFactory` class, define a top-level attribute with the name
of your new class (see the one already there: `__robotics_lab_environment`)
3. Implement your getter, as like `def get_robotics_lab_environment():` and use
similar logic to return your new class back.
4. In `moveit_controller.py` find the line
`EnvironmentFactory.get_robotics_lab_environment()` and change it to match
your new getter method.
Copyright (C) 2016/2017 The University of Leeds and Rafael Papallas
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import copy
from geometry_msgs.msg import PoseStamped
class EnvironmentFactory:
"""
Environment Factory implementing the design pattern.
In here are defined the getters for the different environments and is the
class used in other scripts to generate the class environments required.
"""
_robotics_lab_environment = None
@staticmethod
def initialize():
"""Initialise each environment."""
EnvironmentFactory._robotics_lab_environment = RoboticsLabEnvironment()
@staticmethod
def get_robotics_lab_environment():
"""Will return the robotics lab environment."""
return EnvironmentFactory._robotics_lab_environment.clone()
class Obstacle:
"""This represent an obstacle in real world."""
def __init__(self, obstalce_name, x, y, z, shape_size):
"""
Will configure the obstacle details and set it's attributes.
- obstalce_name: is the name of the obstacle.
- x, y and z: is the position or pose of the obstacle in the world.
- shape_size: is a triple tuple with height, width and depth of the
object or obstacle.
"""
self.name = obstalce_name
# The pose of where the obstacle is
self.pose = PoseStamped()
self.pose.pose.position.x = x
self.pose.pose.position.y = y
self.pose.pose.position.z = z
# Pose Header Frame ID is None because it needs to be set for the
# specific scene, which is not available at the time the obstacle
# is created.
self.pose.header.frame_id = None
# This is a triple tuple (h, w, z) representing the size of the
# obstacle
self.size = shape_size
def set_frame_id(self, id):
"""
Will set the pose's header frame ID.
It is important, for the obstacle to appear in the MoveIt Rviz to set
this to `robot.get_planning_frame()`, since we don't have this info
in here, we need to set this later. Make sure you have set this
otherwise you will not be able to visualise the obstacle in Rviz.
"""
self.pose.header.frame_id = id
class Environment:
"""This is the template class of the Template design pattern."""
# Obstacles represents a list of obstacles
_obstacles = None
def clone(self):
"""
Clone itself.
Required method to clone itself when Factory is used to get the
instance.
"""
pass
def get_obstacles(self):
"""Will return the list with obstacles."""
return self._obstacles
class RoboticsLabEnvironment(Environment):
"""
This class represent's University of Leeds, Robotic's Laboratory.
The obstacles defiend here are specifically to that environment. This is
a subclass of the environment template of the Template design pattern.
"""
def __init__(self):
"""
Default constructor.
Will initialise the obstacles attribute to empty list and will call the
method to create the obstacles.
"""
self._obstacles = []
self._create_obstalces()
def _create_obstalces(self):
"""
Generate and append the obstacles to the class.
In here are the obstacles relevant to this specific environment.
"""
side_wall = Obstacle(obstalce_name="side_wall",
x=0.6,
y=1,
z=0,
shape_size=(4, 0.2, 3))
self._obstacles.append(side_wall)
back_wall = Obstacle(obstalce_name="back_wall",
x=-1,
y=0,
z=0,
shape_size=(0.2, 4, 3))
self._obstacles.append(back_wall)
table = Obstacle(obstalce_name="table",
x=0.7,
y=-0.1,
z=-0.53,
shape_size=(0.8, 1.2, 0.7))
self._obstacles.append(table)
camera_tripod = Obstacle(obstalce_name="camera_tripod",
x=0.6,
y=-1.2,
z=-0.54,
shape_size=(1, 0.3, 1.8))
self._obstacles.append(camera_tripod)
# width, length, height
def clone(self):
"""Required method for the Template design pattern."""
return copy.copy(self)
|
gpl-3.0
| 8,566,268,127,867,704,000
| 34.968254
| 79
| 0.604884
| false
| 4.117505
| false
| false
| false
|
fynjah/django-pimp-my-filter
|
filter_manager/views.py
|
1
|
7132
|
import datetime
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, HttpResponseForbidden
from django.shortcuts import render_to_response, RequestContext
from django.core.context_processors import csrf
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.generic import GenericRelation
from filter_manager.models import Filter, Condition, LOGICAL_OPERATORS
@login_required
def save_filter(request):
if request.method == "POST" and request.is_ajax():
if 'filter' in request.POST:
new_filter = json.loads(request.POST['filter'])
app_model = '%s.%s' % (new_filter['app'],new_filter['model'])
if settings.PIMP_MY_FILTER['ALLOWED_MODELS']:
if not app_model in settings.PIMP_MY_FILTER['ALLOWED_MODELS']:
return HttpResponseForbidden('[{"error":"Forbidden."}]',
mimetype='application/json; charset=utf8')
else:
return HttpResponseForbidden(
'[{"error":"Forbidden. Check PIMP_MY_FILTER Settings."}]',
mimetype='application/json; charset=utf8',
)
ct = ContentType.objects.get_by_natural_key(new_filter['app'],
new_filter['model'])
if new_filter['quick'] == 'true':
quick = True
else:
quick = False
f = Filter(name=new_filter['name'],
user_id=request.user.id,
quick=quick,
content_type = ct,)
f.save()
for k,c in new_filter['conditions'].iteritems():
data = c['value_data']
if (data['type'] == 'ForeignKey'
or data['type'] == 'ManyToManyField'
or data['type'] == 'OneToOneField'):
value = data['fk_id']
elif (data['type'] == 'BooleanField'
or data['type'] == 'NullBooleanField'
or data['type'] == 'FieldFile'
or data['type'] == 'FileField'
or data['type'] == 'ImageField'):
if c['value'] == 'on':
value = True
else:
value = False
else:
value = c['value']
con = Condition(filter=f,
operator = c['operator'],
field_type = data['type'],
value=value,
field=c['field'],)
con.save()
r = {'filter_id':f.id}
return HttpResponse(json.dumps(r, indent = 4 * ' '),
mimetype='application/json; charset=utf8')
else:
return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]',
mimetype='application/json; charset=utf8')
@login_required
def get_structure(request):
if request.method == "POST" and request.is_ajax():
if 'app' in request.POST and 'model' in request.POST:
fields = {}
ct = ContentType.objects.get_by_natural_key(request.POST['app'],
request.POST['model'])
model = ContentType.model_class(ct)
for i,x in enumerate(model._meta.get_all_field_names()):
obj, m, direct, m2m = model._meta.get_field_by_name(x)
if obj.name == 'id' or not direct or isinstance(obj, GenericRelation):
continue
f = {}
f.update({"type":obj.get_internal_type()})
f.update({"name":obj.name})
fields.update( {i: f} )
r = {}
r.update({'fields':fields})
r.update({'operators':LOGICAL_OPERATORS})
return HttpResponse(json.dumps(r, indent = 4 * ' '),
mimetype='application/json; charset=utf8')
return HttpResponseForbidden('[{"error":"Forbidden"}]',
mimetype='application/json; charset=utf8')
def use_filter_internal(filter_id):
if filter_id:
try:
flt = Filter.objects.only('content_type').get(pk = filter_id)
except Filter.DoesNotExist:
return None
model = ContentType.model_class(flt.content_type)
kwargs = {}
for c in flt.conditions.all():
field = None
lookup = c.operator
field = "%s%s" % (c.field, lookup)
kwargs.update({field:c.value})
return model.objects.filter(**kwargs)
else:
return None
@login_required
def use_filter(request):
if request.is_ajax():
if 'filter_id' in request.GET:
try:
flt = Filter.objects.only('content_type').get(pk = request.GET['filter_id'])
except Filter.DoesNotExist:
return HttpResponseForbidden('[{"error":"Filter Not found."}]',
mimetype='application/json; charset=utf8')
model = ContentType.model_class(flt.content_type)
kwargs = {}
for c in flt.conditions.all():
field = None
lookup = c.operator
field = "%s%s" % (c.field, lookup)
kwargs.update({field:c.value})
qs = model.objects.filter(**kwargs)
response = {}
for i,q in enumerate(qs):
field_list = {}
for f in q._meta.get_all_field_names():
obj, model, direct, m2m = q._meta.get_field_by_name(f)
if not direct or isinstance(obj, GenericRelation):
continue
if m2m:
l = {}
val = obj.value_from_object(q)
for m in obj.value_from_object(q):
l.update({m.pk:m.__unicode__()})
field_list.update({f:l})
elif obj.rel:
val = q.__getattribute__(obj.name)
if val:
l = {val.pk:val.__unicode__()}
field_list.update({obj.name:l})
else:
field_list.update({f:None})
else:
field_list.update({f:obj.value_to_string(q)})
response.update({i:field_list})
r = json.dumps(response, indent = 4 * ' ')
return HttpResponse(r,
mimetype='application/json; charset=utf8')
return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]',
mimetype='application/json; charset=utf8')
@login_required
def get_typeahead(request):
if request.is_ajax() and request.method == "POST":
if ('field' in request.POST and
'app' in request.POST and
'model' in request.POST):
ct = ContentType.objects.get_by_natural_key(request.POST['app'],
request.POST['model'])
instance = ContentType.model_class(ct)
f = dict([(x,x) for x in instance._meta.get_all_field_names() ])
try:
o = f[request.POST['field']]
o = instance._meta.get_field_by_name(o)[0]
except KeyError:
return HttpResponseForbidden('[{"error":"Forbidden"}]',
mimetype='application/json; charset=utf8')
o = o.related.parent_model
obj_list = o.objects.all()
lst = {}
for i,obj in enumerate(obj_list):
l = {}
l.update({"id":obj.id})
l.update({"unicode":obj.__unicode__()})
#not sure about __unicode__, actually
lst.update({i:l})
return HttpResponse(json.dumps(lst, indent = 4 * ' '),
mimetype='application/json; charset=utf8')
else:
return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]',
mimetype='application/json; charset=utf8')
def get_filters_by_user(request):
if request.is_ajax():
user_filters = Filter.objects.filter(Q(user = request.user.id)|Q(for_all = True))
f_list = {}
for i,f in enumerate(user_filters):
f_list.update({i:{'id':f.pk, 'name':f.name, 'quick':f.quick}})
return HttpResponse(json.dumps(f_list, indent = 4 * ' '),
mimetype='application/json; charset=utf8')
return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]',
mimetype='application/json; charset=utf8')
|
bsd-3-clause
| -1,585,255,892,386,726,400
| 32.483568
| 103
| 0.651711
| false
| 3.221319
| false
| false
| false
|
LaurentClaessens/phystricks
|
src/MathStructures.py
|
1
|
3272
|
# -*- coding: utf8 -*-
###########################################################################
# This is part of the module phystricks
#
# phystricks is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# phystricks is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with phystricks.py. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
# copyright (c) Laurent Claessens, 2010,2011,2013-2017
# email: laurent@claessens-donadello.eu
from sage.rings.rational import Rational
from sage.all import latex
from Utilities import *
from SmallComputations import MultipleBetween
from AngleMeasure import AngleMeasure
class PolarCoordinates(object):
def __init__(self,r,value_degree=None,value_radian=None):
self.r = r
self.measure=AngleMeasure(value_degree=value_degree,value_radian=value_radian)
self.degree=self.measure.degree
self.radian=self.measure.radian
def __str__(self):
return "PolarCoordinates, r=%s,degree=%s,radian=%s"%(str(self.r),str(self.degree),str(self.radian))
def DegreeAngleMeasure(x):
return AngleMeasure(value_degree=x)
def RadianAngleMeasure(x):
return AngleMeasure(value_radian=x)
class AxesUnit(object):
def __init__(self,numerical_value,latex_symbol=""):
try :
numerical_value=Rational(numerical_value)
except TypeError :
pass
self.numerical_value=numerical_value
self.latex_symbol=latex_symbol
def symbol(self,x):
return latex(x)+self.latex_symbol
def place_list(self,mx,Mx,frac=1,mark_origin=True):
"""
return a tuple of
1. values that are all the integer multiple of
<frac>*self.numerical_value
between mx and Mx
2. the multiple of the basis unit.
Give <frac> as literal real. Recall that python evaluates 1/2 to 0. If you pass 0.5, it will be converted back to 1/2 for a nice display.
"""
try :
frac=Rational(frac) # If the user enters "0.5", it is converted to 1/2
except TypeError :
pass
if frac==0:
raise ValueError,"frac is zero in AxesUnit.place_list(). Maybe you ignore that python evaluates 1/2 to 0 ? (writes literal 0.5 instead) \n Or are you trying to push me in an infinite loop ?"
l=[]
k=var("TheTag")
for x in MultipleBetween(frac*self.numerical_value,mx,Mx,mark_origin):
if self.latex_symbol == "":
l.append((x,"$"+latex(x)+"$"))
else :
pos=(x/self.numerical_value)*k
text="$"+latex(pos).replace("TheTag",self.latex_symbol)+"$" # This risks to be Sage-version dependent.
l.append((x,text))
return l
|
gpl-3.0
| 3,012,877,965,020,561,000
| 40.417722
| 202
| 0.623472
| false
| 3.778291
| false
| false
| false
|
stefanwebb/tensorflow-models
|
tensorflow_models/models/vae_normal_obs.py
|
1
|
5404
|
# MIT License
#
# Copyright (c) 2017, Stefan Webb. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_models as tf_models
def create_placeholders(settings):
x = tf.placeholder(tf.float32, shape=tf_models.batchshape(settings), name='samples')
z = tf.placeholder(tf.float32, shape=tf_models.latentshape(settings), name='codes')
return x, z
def create_prior(settings):
dist_prior = tf_models.standard_normal(tf_models.latentshape(settings))
return tf.identity(dist_prior.sample(), name='p_z/sample')
def create_encoder(settings, reuse=True):
encoder_network = settings['architecture']['encoder']['fn']
x_placeholder = tf_models.samples_placeholder()
assert(not x_placeholder is None)
with tf.variable_scope('encoder', reuse=reuse):
mean_z, diag_stdev_z = encoder_network(settings, x_placeholder, is_training=False)
dist_z_given_x = tf.contrib.distributions.MultivariateNormalDiag(mean_z, diag_stdev_z)
encoder = tf.identity(dist_z_given_x.sample(name='sample'), name='q_z_given_x/sample')
return encoder
def create_decoder(settings, reuse=True):
if 'transformations' in settings and 'rescale' in settings['transformations']:
min_val = settings['transformations']['rescale'][0]
max_val = settings['transformations']['rescale'][1]
else:
min_val = 0.
max_val = 1.
decoder_network = settings['architecture']['decoder']['fn']
z_placeholder = tf_models.codes_placeholder()
assert(not z_placeholder is None)
with tf.variable_scope('decoder', reuse=reuse):
mean_x, diag_stdev_x = decoder_network(settings, z_placeholder, is_training=False)
dist_x_given_z = tf.contrib.distributions.MultivariateNormalDiag(mean_x, diag_stdev_x)
decoder = tf.identity(tf.clip_by_value(dist_x_given_z.sample(), min_val, max_val), name='p_x_given_z/sample')
return decoder
def create_probs(settings, inputs, is_training, reuse=False):
encoder_network = settings['architecture']['encoder']['fn']
decoder_network = settings['architecture']['decoder']['fn']
dist_prior = tf_models.standard_normal(tf_models.latentshape(settings))
# Use recognition network to determine mean and (log) variance of Gaussian distribution in latent space
with tf.variable_scope('encoder', reuse=reuse):
mean_z, diag_stdev_z = encoder_network(settings, inputs, is_training=is_training)
dist_z_given_x = tf.contrib.distributions.MultivariateNormalDiag(mean_z, diag_stdev_z)
# Draw one sample z from Gaussian distribution
eps = tf.random_normal(tf_models.latentshape(settings), 0, 1, dtype=tf.float32)
z_sample = tf.add(mean_z, tf.multiply(diag_stdev_z, eps))
# Use generator to determine mean of Bernoulli distribution of reconstructed input
with tf.variable_scope('decoder', reuse=reuse):
mean_x, diag_stdev_x = decoder_network(settings, z_sample, is_training=is_training)
dist_x_given_z = tf.contrib.distributions.MultivariateNormalDiag(tf_models.flatten(mean_x), tf_models.flatten(diag_stdev_x))
#print('*** Debugging ***')
#print('mean_x.shape', mean_x.shape)
#print('diag_stdev_x.shape', diag_stdev_x.shape)
#print('dist_x_given_z.sample().shape', dist_x_given_z.sample().shape)
#print('dist_x_given_z.log_prob(tf_models.flatten(inputs)).shape', dist_x_given_z.log_prob(tf_models.flatten(inputs)).shape)
lg_p_x_given_z = tf.identity(dist_x_given_z.log_prob(tf_models.flatten(inputs)), name='p_x_given_z/log_prob')
lg_p_z = tf.identity(dist_prior.log_prob(z_sample), name='p_z/log_prob')
lg_q_z_given_x = tf.identity(dist_z_given_x.log_prob(z_sample), name='q_z_given_x/log_prob')
return lg_p_x_given_z, lg_p_z, lg_q_z_given_x
# TODO: Fix this to be normal distribution!
def lg_likelihood(x, z, settings, reuse=True, is_training=False):
decoder_network = settings['architecture']['decoder']['fn']
with tf.variable_scope('model'):
with tf.variable_scope('decoder', reuse=reuse):
logits_x = decoder_network(settings, z, is_training=is_training)
dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=tf_models.flatten(logits_x), dtype=tf.float32)
return tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(x)), 1)
def lg_prior(z, settings, reuse=True, is_training=False):
dist_prior = tf_models.standard_normal(z.shape)
return dist_prior.log_prob(z)
|
mit
| 4,858,192,034,011,259,000
| 45.586207
| 125
| 0.744078
| false
| 3.180695
| false
| false
| false
|
lamestation/packthing
|
packthing/util.py
|
1
|
7019
|
import errno
import os
import platform
import shutil
import string
import subprocess
import sys
import tarfile
import zipfile
from contextlib import contextmanager
def get_platform():
_platform = dict()
_platform["system"] = platform.system().lower()
machine = platform.machine().lower()
if machine == "x86_64":
machine = "amd64"
_platform["machine"] = machine
return _platform
def warning(*args):
print("WARNING:" + " ".join(args))
def error(*objs):
blocks = []
for b in " ".join(objs).split("\n"):
if len(blocks) > 0:
blocks.append(" " + b)
else:
blocks.append(b)
print("\nERROR:" + "\n".join(blocks))
print()
sys.exit(1)
def subtitle(text):
line = (80 - (len(text) + 2)) // 2
print("-" * line, text, "-" * (line + (len(text) % 2)))
def title(text):
line = (80 - (len(text) + 2)) // 2
print("=" * line, text.upper(), "=" * (line + (len(text) % 2)))
def headline(func):
def wrapper(*args, **kwargs):
title(func.__name__)
res = func(*args, **kwargs)
return res
return wrapper
@contextmanager
def pushd(newDir):
previousDir = os.getcwd()
os.chdir(newDir)
yield
os.chdir(previousDir)
def copy(src, dest, verbose=True, permissions=0o644):
destfile = os.path.join(dest, os.path.basename(src))
if verbose:
print("Copy", src, "to dir", dest)
mkdir(dest)
shutil.copy(src, destfile)
os.chmod(destfile, permissions)
def command(args, verbose=True, strict=True, stdinput=None, abort=None):
if abort is None:
abort = True
if verbose:
print("-", " ".join(args))
if not args:
error("Attempting to run empty command.")
try:
process = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE
)
except OSError as e:
if abort:
error("Command '" + args[0] + "' not found; exiting.")
return
if stdinput is not None:
stdinput = stdinput.encode()
out, err = process.communicate(input=stdinput)
out = out.decode()
err = err.decode()
if strict:
if process.returncode:
print(err)
raise subprocess.CalledProcessError(process.returncode, args, err)
return out, err
def command_in_dir(args, newdir, verbose=True, strict=True, stdinput=None):
if verbose:
print("DIR:", newdir)
with pushd(newdir):
out, err = command(args, verbose=verbose, strict=strict)
return out, err
def table(path, version, url):
return "%30s %10s %s" % (path, version, url)
def make(path, args):
with pushd(path):
args.insert(0, "make")
for m in ["make", "mingw32-make"]:
args[0] = m
failed = 0
try:
subprocess.check_call(args)
except OSError:
failed = 1
except subprocess.CalledProcessError as e:
error("Failed to build project '" + path + "'")
if not failed:
return
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def mkdir(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def tar_archive(name, files):
shortname = os.path.basename(name)
name += ".tgz"
archive = tarfile.open(name=name, mode="w:gz")
for f in files:
archive.add(name=f, arcname=os.path.join(shortname, f), recursive=False)
archive.close()
def zip_archive(name, files):
shortname = os.path.basename(name)
name += ".zip"
archive = zipfile.ZipFile(name, "w")
for f in files:
archive.write(
filename=f,
arcname=os.path.join(shortname, f),
compress_type=zipfile.ZIP_DEFLATED,
)
archive.close()
def from_scriptroot(filename):
currentpath = os.path.dirname(os.path.abspath(__file__))
return os.path.join(currentpath, filename)
def get_template_text(template):
template = os.path.join("template", template)
template = from_scriptroot(template)
return open(template, "r").read()
def get_template(template):
return string.Template(get_template_text(template))
# python-chroot-builder
# Copyright (C) 2012 Ji-hoon Kim
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------
def ldd(filenames):
libs = []
for x in filenames:
p = subprocess.Popen(["ldd", x], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = p.stdout.readlines()
for x in result:
s = x.split()
s.pop(1)
s.pop()
if len(s) == 2:
libs.append(s)
return libs
# -----------------------------------------
def extract_libs(files, libs):
resultlibs = []
for f in files:
for l in ldd([which(f)]):
for lib in libs:
if l[0].find(lib) == -1:
pass
else:
resultlibs.append(l)
return sorted(list(set(tuple(lib) for lib in resultlibs)))
def write(text, filename):
f = open(filename, "w")
f.seek(0)
f.write(text)
f.close()
def create(text, filename, executable=False):
print("Create", filename)
mkdir(os.path.dirname(filename))
f = open(filename, "w")
f.seek(0)
f.write(text)
f.close()
if executable:
os.chmod(filename, 0o755)
else:
os.chmod(filename, 0o644)
def root():
if os.geteuid() != 0:
error("This configuration requires root privileges!")
def cksum(files):
print("cksum:")
for f in files:
try:
out, err = command(["cksum", f], verbose=False)
except subprocess.CalledProcessError as e:
error("Failed to checksum file:", f)
print("| " + out.replace("\n", ""))
|
gpl-3.0
| 2,858,408,366,746,371,000
| 23.371528
| 88
| 0.579855
| false
| 3.719661
| false
| false
| false
|
tklengyel/patchwork
|
apps/patchwork/views/xmlrpc.py
|
1
|
13846
|
# Patchwork - automated patch tracking system
# Copyright (C) 2008 Jeremy Kerr <jk@ozlabs.org>
#
# This file is part of the Patchwork package.
#
# Patchwork is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Patchwork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Patchwork; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Patchwork XMLRPC interface
#
from SimpleXMLRPCServer import SimpleXMLRPCDispatcher
from django.http import HttpResponse, HttpResponseRedirect, \
HttpResponseServerError
from django.core import urlresolvers
from django.contrib.auth import authenticate
from patchwork.models import Patch, Project, Person, State
from patchwork.views import patch_to_mbox
from django.views.decorators.csrf import csrf_exempt
import sys
import base64
import xmlrpclib
class PatchworkXMLRPCDispatcher(SimpleXMLRPCDispatcher):
def __init__(self):
if sys.version_info[:3] >= (2,5,):
SimpleXMLRPCDispatcher.__init__(self, allow_none=False,
encoding=None)
def _dumps(obj, *args, **kwargs):
kwargs['allow_none'] = self.allow_none
kwargs['encoding'] = self.encoding
return xmlrpclib.dumps(obj, *args, **kwargs)
else:
def _dumps(obj, *args, **kwargs):
return xmlrpclib.dumps(obj, *args, **kwargs)
SimpleXMLRPCDispatcher.__init__(self)
self.dumps = _dumps
# map of name => (auth, func)
self.func_map = {}
def register_function(self, fn, auth_required):
self.func_map[fn.__name__] = (auth_required, fn)
def _user_for_request(self, request):
auth_header = None
if 'HTTP_AUTHORIZATION' in request.META:
auth_header = request.META.get('HTTP_AUTHORIZATION')
elif 'Authorization' in request.META:
auth_header = request.META.get('Authorization')
if auth_header is None or auth_header == '':
raise Exception("No authentication credentials given")
str = auth_header.strip()
if not str.startswith('Basic '):
raise Exception("Authentication scheme not supported")
str = str[len('Basic '):].strip()
try:
decoded = base64.decodestring(str)
username, password = decoded.split(':', 1)
except:
raise Exception("Invalid authentication credentials")
return authenticate(username = username, password = password)
def _dispatch(self, request, method, params):
if method not in self.func_map.keys():
raise Exception('method "%s" is not supported' % method)
auth_required, fn = self.func_map[method]
if auth_required:
user = self._user_for_request(request)
if not user:
raise Exception("Invalid username/password")
params = (user,) + params
return fn(*params)
def _marshaled_dispatch(self, request):
try:
params, method = xmlrpclib.loads(request.body)
response = self._dispatch(request, method, params)
# wrap response in a singleton tuple
response = (response,)
response = self.dumps(response, methodresponse=1)
except xmlrpclib.Fault, fault:
response = self.dumps(fault)
except:
# report exception back to server
response = self.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)),
)
return response
dispatcher = PatchworkXMLRPCDispatcher()
# XMLRPC view function
@csrf_exempt
def xmlrpc(request):
if request.method != 'POST':
return HttpResponseRedirect(
urlresolvers.reverse('patchwork.views.help',
kwargs = {'path': 'pwclient/'}))
response = HttpResponse()
try:
ret = dispatcher._marshaled_dispatch(request)
response.write(ret)
except Exception:
return HttpResponseServerError()
return response
# decorator for XMLRPC methods. Setting login_required to true will call
# the decorated function with a non-optional user as the first argument.
def xmlrpc_method(login_required = False):
def wrap(f):
dispatcher.register_function(f, login_required)
return f
return wrap
# We allow most of the Django field lookup types for remote queries
LOOKUP_TYPES = ["iexact", "contains", "icontains", "gt", "gte", "lt",
"in", "startswith", "istartswith", "endswith",
"iendswith", "range", "year", "month", "day", "isnull" ]
#######################################################################
# Helper functions
#######################################################################
def project_to_dict(obj):
"""Return a trimmed down dictionary representation of a Project
object which is OK to send to the client."""
return \
{
'id' : obj.id,
'linkname' : obj.linkname,
'name' : obj.name,
}
def person_to_dict(obj):
"""Return a trimmed down dictionary representation of a Person
object which is OK to send to the client."""
# Make sure we don't return None even if the user submitted a patch
# with no real name. XMLRPC can't marshall None.
if obj.name is not None:
name = obj.name
else:
name = obj.email
return \
{
'id' : obj.id,
'email' : obj.email,
'name' : name,
'user' : unicode(obj.user).encode("utf-8"),
}
def patch_to_dict(obj):
"""Return a trimmed down dictionary representation of a Patch
object which is OK to send to the client."""
return \
{
'id' : obj.id,
'date' : unicode(obj.date).encode("utf-8"),
'filename' : obj.filename(),
'msgid' : obj.msgid,
'name' : obj.name,
'project' : unicode(obj.project).encode("utf-8"),
'project_id' : obj.project_id,
'state' : unicode(obj.state).encode("utf-8"),
'state_id' : obj.state_id,
'submitter' : unicode(obj.submitter).encode("utf-8"),
'submitter_id' : obj.submitter_id,
'delegate' : unicode(obj.delegate).encode("utf-8"),
'delegate_id' : max(obj.delegate_id, 0),
'commit_ref' : max(obj.commit_ref, ''),
}
def bundle_to_dict(obj):
"""Return a trimmed down dictionary representation of a Bundle
object which is OK to send to the client."""
return \
{
'id' : obj.id,
'name' : obj.name,
'n_patches' : obj.n_patches(),
'public_url' : obj.public_url(),
}
def state_to_dict(obj):
"""Return a trimmed down dictionary representation of a State
object which is OK to send to the client."""
return \
{
'id' : obj.id,
'name' : obj.name,
}
#######################################################################
# Public XML-RPC methods
#######################################################################
@xmlrpc_method(False)
def pw_rpc_version():
"""Return Patchwork XML-RPC interface version."""
return 1
@xmlrpc_method(False)
def project_list(search_str="", max_count=0):
"""Get a list of projects matching the given filters."""
try:
if len(search_str) > 0:
projects = Project.objects.filter(linkname__icontains = search_str)
else:
projects = Project.objects.all()
if max_count > 0:
return map(project_to_dict, projects)[:max_count]
else:
return map(project_to_dict, projects)
except:
return []
@xmlrpc_method(False)
def project_get(project_id):
"""Return structure for the given project ID."""
try:
project = Project.objects.filter(id = project_id)[0]
return project_to_dict(project)
except:
return {}
@xmlrpc_method(False)
def person_list(search_str="", max_count=0):
"""Get a list of Person objects matching the given filters."""
try:
if len(search_str) > 0:
people = (Person.objects.filter(name__icontains = search_str) |
Person.objects.filter(email__icontains = search_str))
else:
people = Person.objects.all()
if max_count > 0:
return map(person_to_dict, people)[:max_count]
else:
return map(person_to_dict, people)
except:
return []
@xmlrpc_method(False)
def person_get(person_id):
"""Return structure for the given person ID."""
try:
person = Person.objects.filter(id = person_id)[0]
return person_to_dict(person)
except:
return {}
@xmlrpc_method(False)
def patch_list(filter={}):
"""Get a list of patches matching the given filters."""
try:
# We allow access to many of the fields. But, some fields are
# filtered by raw object so we must lookup by ID instead over
# XML-RPC.
ok_fields = [
"id",
"name",
"project_id",
"submitter_id",
"delegate_id",
"state_id",
"date",
"commit_ref",
"hash",
"msgid",
"max_count",
]
dfilter = {}
max_count = 0
for key in filter:
parts = key.split("__")
if parts[0] not in ok_fields:
# Invalid field given
return []
if len(parts) > 1:
if LOOKUP_TYPES.count(parts[1]) == 0:
# Invalid lookup type given
return []
if parts[0] == 'project_id':
dfilter['project'] = Project.objects.filter(id =
filter[key])[0]
elif parts[0] == 'submitter_id':
dfilter['submitter'] = Person.objects.filter(id =
filter[key])[0]
elif parts[0] == 'state_id':
dfilter['state'] = State.objects.filter(id =
filter[key])[0]
elif parts[0] == 'max_count':
max_count = filter[key]
else:
dfilter[key] = filter[key]
patches = Patch.objects.filter(**dfilter)
if max_count > 0:
return map(patch_to_dict, patches[:max_count])
else:
return map(patch_to_dict, patches)
except:
return []
@xmlrpc_method(False)
def patch_get(patch_id):
"""Return structure for the given patch ID."""
try:
patch = Patch.objects.filter(id = patch_id)[0]
return patch_to_dict(patch)
except:
return {}
@xmlrpc_method(False)
def patch_get_by_hash(hash):
"""Return structure for the given patch hash."""
try:
patch = Patch.objects.filter(hash = hash)[0]
return patch_to_dict(patch)
except:
return {}
@xmlrpc_method(False)
def patch_get_by_project_hash(project, hash):
"""Return structure for the given patch hash."""
try:
patch = Patch.objects.filter(project__linkname = project,
hash = hash)[0]
return patch_to_dict(patch)
except:
return {}
@xmlrpc_method(False)
def patch_get_mbox(patch_id):
"""Return mbox string for the given patch ID."""
try:
patch = Patch.objects.filter(id = patch_id)[0]
return patch_to_mbox(patch).as_string()
except:
return ""
@xmlrpc_method(False)
def patch_get_diff(patch_id):
"""Return diff for the given patch ID."""
try:
patch = Patch.objects.filter(id = patch_id)[0]
return patch.content
except:
return ""
@xmlrpc_method(True)
def patch_set(user, patch_id, params):
"""Update a patch with the key,value pairs in params. Only some parameters
can be set"""
try:
ok_params = ['state', 'commit_ref', 'archived']
patch = Patch.objects.get(id = patch_id)
if not patch.is_editable(user):
raise Exception('No permissions to edit this patch')
for (k, v) in params.iteritems():
if k not in ok_params:
continue
if k == 'state':
patch.state = State.objects.get(id = v)
else:
setattr(patch, k, v)
patch.save()
return True
except:
raise
@xmlrpc_method(False)
def state_list(search_str="", max_count=0):
"""Get a list of state structures matching the given search string."""
try:
if len(search_str) > 0:
states = State.objects.filter(name__icontains = search_str)
else:
states = State.objects.all()
if max_count > 0:
return map(state_to_dict, states)[:max_count]
else:
return map(state_to_dict, states)
except:
return []
@xmlrpc_method(False)
def state_get(state_id):
"""Return structure for the given state ID."""
try:
state = State.objects.filter(id = state_id)[0]
return state_to_dict(state)
except:
return {}
|
gpl-2.0
| -7,321,025,644,502,314,000
| 30.114607
| 79
| 0.560162
| false
| 4.12206
| false
| false
| false
|
razisayyed/django-ads
|
ads/conf.py
|
1
|
1926
|
from django.conf import settings
from appconf import AppConf
from django.utils.translation import ugettext_lazy as _
gettext = lambda s: s
class AdsConf(AppConf):
class Meta:
prefix = 'ads'
GOOGLE_ADSENSE_CLIENT = None # 'ca-pub-xxxxxxxxxxxxxxxx'
ZONES = {
'header': {
'name': gettext('Header'),
'ad_size': {
'xs': '720x150',
'sm': '800x90',
'md': '800x90',
'lg': '800x90',
'xl': '800x90'
},
'google_adsense_slot': None, # 'xxxxxxxxx',
'google_adsense_format': None, # 'auto'
},
'content': {
'name': gettext('Content'),
'ad_size': {
'xs': '720x150',
'sm': '800x90',
'md': '800x90',
'lg': '800x90',
'xl': '800x90'
},
'google_adsense_slot': None, # 'xxxxxxxxx',
'google_adsense_format': None, # 'auto'
},
'sidebar': {
'name': gettext('Sidebar'),
'ad_size': {
'xs': '720x150',
'sm': '800x90',
'md': '800x90',
'lg': '800x90',
'xl': '800x90'
}
}
}
DEFAULT_AD_SIZE = '720x150'
DEVICES = (
('xs', _('Extra small devices')),
('sm', _('Small devices')),
('md', _('Medium devices (Tablets)')),
('lg', _('Large devices (Desktops)')),
('xl', _('Extra large devices (Large Desktops)')),
)
VIEWPORTS = {
'xs': 'd-block img-fluid d-sm-none',
'sm': 'd-none img-fluid d-sm-block d-md-none',
'md': 'd-none img-fluid d-md-block d-lg-none',
'lg': 'd-none img-fluid d-lg-block d-xl-none',
'xl': 'd-none img-fluid d-xl-block',
}
|
apache-2.0
| 5,576,397,388,752,822,000
| 26.913043
| 61
| 0.419522
| false
| 3.546961
| false
| false
| false
|
lukasmonk/lucaschess
|
Code/GestorTurnOnLights.py
|
1
|
12214
|
import time
from Code import ControlPosicion
from Code import Gestor
from Code import Jugada
from Code import TurnOnLights
from Code.QT import QTUtil
from Code.QT import QTUtil2
from Code.Constantes import *
class GestorTurnOnLights(Gestor.Gestor):
def inicio(self, num_theme, num_block, tol):
if hasattr(self, "reiniciando"):
if self.reiniciando:
return
self.reiniciando = True
self.num_theme = num_theme
self.num_block = num_block
self.tol = tol
self.block = self.tol.get_block(self.num_theme, self.num_block)
self.block.shuffle()
self.calculation_mode = self.tol.is_calculation_mode()
self.penaltyError = self.block.penaltyError(self.calculation_mode)
self.penaltyHelp = self.block.penaltyHelp(self.calculation_mode)
# self.factorDistancia = self.block.factorDistancia() # No se usa es menor que 1.0
self.av_seconds = self.block.av_seconds()
if self.av_seconds:
cat, ico = self.block.cqualification(self.calculation_mode)
self.lb_previous = "%s - %0.2f\"" % (cat, self.av_seconds)
else:
self.lb_previous = None
self.num_line = 0
self.num_lines = len(self.block)
self.num_moves = 0
self.total_time_used = 0.0
self.ayudas = 0
self.errores = 0
self.dicFENayudas = {} # se muestra la flecha a partir de dos del mismo
self.tipoJuego = kJugEntLight
self.siJuegaHumano = False
self.siTutorActivado = False
self.pantalla.ponActivarTutor(False)
self.ayudasPGN = 0
self.pantalla.activaJuego(True, False, siAyudas=False)
self.pantalla.quitaAyudas(True, True)
self.ponMensajero(self.mueveHumano)
self.mostrarIndicador(True)
self.reiniciando = False
self.next_line_run()
def pon_rotulos(self, next):
r1 = _("Calculation mode") if self.calculation_mode else _("Memory mode")
r1 += "<br>%s" % self.line.label
if self.lb_previous:
r1 += "<br><b>%s</b>" % self.lb_previous
if self.num_line:
av_secs, txt = self.block.calc_current(self.num_line - 1, self.total_time_used, self.errores, self.ayudas, self.calculation_mode)
r1 += "<br><b>%s: %s - %0.2f\"" % (_("Current"), txt, av_secs)
self.ponRotulo1(r1)
if next is not None:
r2 = "<b>%d/%d</b>" % (self.num_line + next, self.num_lines)
else:
r2 = None
self.ponRotulo2(r2)
def next_line(self):
if self.num_line < self.num_lines:
self.line = self.block.line(self.num_line)
self.num_move = -1
self.ini_time = None
cp = ControlPosicion.ControlPosicion()
cp.leeFen(self.line.fen)
self.partida.reset(cp)
siBlancas = cp.siBlancas
self.siJugamosConBlancas = siBlancas
self.siRivalConBlancas = not siBlancas
self.ponPosicion(self.partida.ultPosicion)
self.ponPiezasAbajo(siBlancas)
self.pgnRefresh(True)
self.partida.pendienteApertura = False
self.pon_rotulos(1)
def next_line_run(self):
liOpciones = [k_mainmenu, k_ayuda, k_reiniciar]
self.pantalla.ponToolBar(liOpciones)
self.next_line()
QTUtil.xrefreshGUI()
self.ponPosicionDGT()
self.estado = kJugando
self.siguienteJugada()
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave == k_ayuda:
self.ayuda()
elif clave == k_reiniciar:
self.reiniciar()
elif clave == k_configurar:
self.configurar(siSonidos=True, siCambioTutor=False)
elif clave == k_utilidades:
self.utilidades()
elif clave == k_siguiente:
self.next_line_run()
def reiniciar(self):
if self.estado == kJugando:
if self.ini_time:
self.total_time_used += time.time() - self.ini_time
if self.total_time_used:
self.block.new_reinit(self.total_time_used, self.errores, self.ayudas)
self.total_time_used = 0.0
TurnOnLights.write_tol(self.tol)
self.inicio(self.num_theme, self.num_block, self.tol)
def siguienteJugada(self):
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
siRival = siBlancas == self.siRivalConBlancas
self.num_move += 1
if self.num_move >= self.line.total_moves():
self.finLinea()
return
if siRival:
pv = self.line.get_move(self.num_move)
desde, hasta, coronacion = pv[:2], pv[2:4], pv[4:]
self.mueveRival(desde, hasta, coronacion)
self.siguienteJugada()
else:
self.siJuegaHumano = True
self.base_time = time.time()
if not (self.calculation_mode and self.ini_time is None): # Se inicia salvo que sea el principio de la linea
self.ini_time = self.base_time
self.activaColor(siBlancas)
if self.calculation_mode:
self.tablero.setDispatchMove(self.dispatchMove)
def dispatchMove(self):
if self.ini_time is None:
self.ini_time = time.time()
def finLinea(self):
self.num_line += 1
islast_line = self.num_line == self.num_lines
if islast_line:
#Previous
ant_tm = self.block.av_seconds()
ant_done = self.tol.done_level()
ant_cat_level, nada = self.tol.cat_num_level()
ant_cat_global = self.tol.cat_global()
num_moves = self.block.num_moves()
ta = self.total_time_used + self.errores*self.penaltyError + self.ayudas*self.penaltyHelp
tm = ta/num_moves
self.block.new_result(tm, self.total_time_used, self.errores, self.ayudas)
TurnOnLights.write_tol(self.tol)
cat_block, ico = TurnOnLights.qualification(tm, self.calculation_mode)
cat_level, ico = self.tol.cat_num_level()
cat_global = self.tol.cat_global()
txt_more_time = ""
txt_more_cat = ""
txt_more_line = ""
txt_more_global = ""
if ant_tm is None or tm < ant_tm:
txt_more_time = '<span style="color:red">%s</span>' % _("New record")
done = self.tol.done_level()
if done and (not ant_done):
if not self.tol.islast_level():
txt_more_line = "%s<hr>" % _("Open the next level")
if cat_level != ant_cat_level:
txt_more_cat = '<span style="color:red">%s</span>' % _("New")
if cat_global != ant_cat_global:
txt_more_global = '<span style="color:red">%s</span>' % _("New")
cErrores = '<tr><td align=right> %s </td><td> %d (x%d"=%d")</td></tr>' % (_('Errors'), self.errores, self.penaltyError, self.errores*self.penaltyError) if self.errores else ""
cAyudas = '<tr><td align=right> %s </td><td> %d (x%d"=%d")</td></tr>' % (_('Hints'), self.ayudas, self.penaltyHelp, self.ayudas*self.penaltyHelp) if self.ayudas else ""
mens = ('<hr><center><big>'+_('You have finished this block of positions') +
'<hr><table>' +
'<tr><td align=right> %s </td><td> %0.2f"</td></tr>' % (_('Time used'), self.total_time_used) +
cErrores +
cAyudas +
'<tr><td align=right> %s: </td><td> %0.2f" %s</td></tr>' % (_('Time assigned'), ta, txt_more_time) +
'<tr><td align=right> %s: </td><td> %d</td></tr>' % (_('Total moves'), num_moves) +
'<tr><td align=right> %s: </td><td> %0.2f"</td></tr>' % (_('Average time'), tm) +
'<tr><td align=right> %s: </td><td> %s</td></tr>' % (_('Block qualification'), cat_block) +
'<tr><td align=right> %s: </td><td> %s %s</td></tr>' % (_('Level qualification'), cat_level, txt_more_cat) +
'<tr><td align=right> %s: </td><td> %s %s</td></tr>' % (_('Global qualification'), cat_global, txt_more_global) +
'</table></center></big><hr>' +
txt_more_line
)
self.pon_rotulos(None)
QTUtil2.mensaje(self.pantalla, mens, _("Result of training"))
self.total_time_used = 0
else:
if self.tol.go_fast == True or (self.tol.go_fast is None and self.tol.work_level > 0):
self.next_line_run()
return
QTUtil2.mensajeTemporal(self.pantalla, _("This line training is completed."), 1.3)
self.pon_rotulos(0)
self.estado = kFinJuego
self.desactivaTodas()
liOpciones = [k_mainmenu, k_reiniciar, k_configurar, k_utilidades]
if not islast_line:
liOpciones.append(k_siguiente)
self.pantalla.ponToolBar(liOpciones)
def mueveHumano(self, desde, hasta, coronacion=None):
if self.ini_time is None:
self.ini_time = self.base_time
end_time = time.time()
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
movimiento = jg.movimiento().lower()
if movimiento == self.line.get_move(self.num_move).lower():
self.movimientosPiezas(jg.liMovs)
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, True)
self.error = ""
self.total_time_used += (end_time - self.ini_time)
self.siguienteJugada()
return True
self.errores += 1
self.sigueHumano()
return False
def masJugada(self, jg, siNuestra):
if self.siTerminada():
jg.siJaqueMate = jg.siJaque
jg.siAhogado = not jg.siJaque
self.partida.append_jg(jg)
resp = self.partida.si3repetidas()
if resp:
jg.siTablasRepeticion = True
rotulo = ""
for j in resp:
rotulo += "%d," % (j / 2 + 1,)
rotulo = rotulo.strip(",")
self.rotuloTablasRepeticion = rotulo
if self.partida.ultPosicion.movPeonCap >= 100:
jg.siTablas50 = True
if self.partida.ultPosicion.siFaltaMaterial():
jg.siTablasFaltaMaterial = True
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
def mueveRival(self, desde, hasta, coronacion):
siBien, mens, jg = Jugada.dameJugada(self.partida.ultPosicion, desde, hasta, coronacion)
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, False)
self.movimientosPiezas(jg.liMovs, True)
self.error = ""
def ayuda(self):
self.ayudas += 1
mov = self.line.get_move(self.num_move).lower()
self.tablero.markPosition(mov[:2])
fen = self.partida.ultPosicion.fen()
if fen not in self.dicFENayudas:
self.dicFENayudas[fen] = 1
else:
self.dicFENayudas[fen] += 1
if self.dicFENayudas[fen] > 2:
self.ponFlechaSC(mov[:2], mov[2:4])
def finPartida(self):
self.procesador.inicio()
self.procesador.showTurnOnLigths(self.tol.name)
def finalX(self):
self.procesador.inicio()
return False
def actualPGN(self):
resp = '[Event "%s"]\n' % _("Turn on the lights")
resp += '[Site "%s"]\n' % self.line.label.replace("<br>", " ").strip()
resp += '[FEN "%s"\n' % self.partida.iniPosicion.fen()
resp += "\n" + self.partida.pgnBase()
return resp
|
gpl-2.0
| 7,487,070,209,496,803,000
| 34.923529
| 187
| 0.55682
| false
| 3.16261
| false
| false
| false
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-mzid/package.py
|
1
|
2260
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RMzid(RPackage):
"""A parser for mzIdentML files implemented using the XML package. The
parser tries to be general and able to handle all types of mzIdentML
files with the drawback of having less 'pretty' output than a vendor
specific parser. Please contact the maintainer with any problems and
supply an mzIdentML file so the problems can be fixed quickly."""
homepage = "https://www.bioconductor.org/packages/mzID/"
url = "https://git.bioconductor.org/packages/mzID"
version('1.14.0', git='https://git.bioconductor.org/packages/mzID', commit='1c53aa6523ae61d3ebb13381381fc119d6cc6115')
depends_on('r-xml', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-doparallel', type=('build', 'run'))
depends_on('r-foreach', type=('build', 'run'))
depends_on('r-iterators', type=('build', 'run'))
depends_on('r-protgenerics', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.14.0')
|
lgpl-2.1
| -8,712,464,723,545,602,000
| 48.130435
| 122
| 0.670796
| false
| 3.779264
| false
| false
| false
|
sg-/project_generator
|
project_generator/builders/gccarm.py
|
1
|
1789
|
# Copyright 2014 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import logging
from .builder import Builder
from os.path import dirname
class MakefileGccArmBuilder(Builder):
# http://www.gnu.org/software/make/manual/html_node/Running.html
ERRORLEVEL = {
0: 'success (0 warnings, 0 errors)',
1: 'targets not already up to date',
2: 'errors'
}
SUCCESSVALUE = 0
def build_project(self, project_name, project_files, env_settings):
# cwd: relpath(join(project_path, ("gcc_arm" + project)))
# > make all
path = dirname(project_files[0])
logging.debug("Building GCC ARM project: %s" % path)
args = ['make', 'all']
try:
ret_code = None
ret_code = subprocess.call(args, cwd=path)
except:
logging.error("Error whilst calling make. Is it in your PATH?")
else:
if ret_code != self.SUCCESSVALUE:
# Seems like something went wrong.
logging.error("Build failed with the status: %s" %
self.ERRORLEVEL[ret_code])
else:
logging.info("Build succeeded with the status: %s" %
self.ERRORLEVEL[ret_code])
|
apache-2.0
| 6,599,894,252,396,413,000
| 34.078431
| 75
| 0.628284
| false
| 4.075171
| false
| false
| false
|
pu239ppy/authentic2
|
authentic2/migrations/0011_auto__add_authenticationevent.py
|
1
|
4418
|
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from authentic2.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AuthenticationEvent'
db.create_table(u'authentic2_authenticationevent', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('when', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('who', self.gf('django.db.models.fields.CharField')(max_length=80)),
('how', self.gf('django.db.models.fields.CharField')(max_length=10)),
('nonce', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'authentic2', ['AuthenticationEvent'])
def backwards(self, orm):
# Deleting model 'AuthenticationEvent'
db.delete_table(u'authentic2_authenticationevent')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
},
u'authentic2.authenticationevent': {
'Meta': {'object_name': 'AuthenticationEvent'},
'how': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nonce': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'authentic2.deleteduser': {
'Meta': {'object_name': 'DeletedUser'},
'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_model_label})
},
u'authentic2.userexternalid': {
'Meta': {'object_name': 'UserExternalId'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '256'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_model_label})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['authentic2']
|
agpl-3.0
| -636,708,911,938,553,300
| 57.906667
| 187
| 0.559982
| false
| 3.642209
| false
| false
| false
|
bgris/ODL_bgris
|
odl/trafos/util/ft_utils.py
|
1
|
23184
|
# Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Utility functions for Fourier transforms on regularly sampled data."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
import numpy as np
from odl.discr import (
uniform_grid, DiscreteLp, uniform_partition_fromgrid,
uniform_discr_frompartition)
from odl.set import RealNumbers
from odl.util import (
fast_1d_tensor_mult,
is_real_dtype, is_scalar_dtype, is_real_floating_dtype,
is_complex_floating_dtype, complex_dtype, dtype_repr,
conj_exponent,
normalized_scalar_param_list, normalized_axes_tuple)
__all__ = ('reciprocal_grid', 'realspace_grid',
'reciprocal_space',
'dft_preprocess_data', 'dft_postprocess_data')
def reciprocal_grid(grid, shift=True, axes=None, halfcomplex=False):
"""Return the reciprocal of the given regular grid.
This function calculates the reciprocal (Fourier/frequency space)
grid for a given regular grid defined by the nodes::
x[k] = x[0] + k * s,
where ``k = (k[0], ..., k[d-1])`` is a ``d``-dimensional index in
the range ``0 <= k < N`` (component-wise). The multi-index
``N`` is the shape of the input grid.
This grid's reciprocal is then given by the nodes::
xi[j] = xi[0] + j * sigma,
with the reciprocal grid stride ``sigma = 2*pi / (s * N)``.
The minimum frequency ``xi[0]`` can in principle be chosen
freely, but usually it is chosen in a such a way that the reciprocal
grid is centered around zero. For this, there are two possibilities:
1. Make the grid point-symmetric around 0.
2. Make the grid "almost" point-symmetric around zero by shifting
it to the left by half a reciprocal stride.
In the first case, the minimum frequency (per axis) is given as::
xi_1[0] = -pi/s + pi/(s*n) = -pi/s + sigma/2.
For the second case, it is::
xi_1[0] = -pi / s.
Note that the zero frequency is contained in case 1 for an odd
number of points, while for an even size, the second option
guarantees that 0 is contained.
If a real-to-complex (half-complex) transform is to be computed,
the reciprocal grid has the shape ``M[i] = floor(N[i]/2) + 1``
in the last transform axis ``i``.
Parameters
----------
grid : uniform `RectGrid`
Original sampling grid,.
shift : bool or sequence of bools, optional
If ``True``, the grid is shifted by half a stride in the negative
direction. With a sequence, this option is applied separately on
each axis.
axes : int or sequence of ints, optional
Dimensions in which to calculate the reciprocal. The sequence
must have the same length as ``shift`` if the latter is given
as a sequence. ``None`` means all axes in ``grid``.
halfcomplex : bool, optional
If ``True``, return the half of the grid with last coordinate
less than zero. This is related to the fact that for real-valued
functions, the other half is the mirrored complex conjugate of
the given half and therefore needs not be stored.
Returns
-------
reciprocal_grid : uniform `RectGrid`
The reciprocal grid.
"""
if axes is None:
axes = list(range(grid.ndim))
else:
try:
axes = [int(axes)]
except TypeError:
axes = list(axes)
# List indicating shift or not per "active" axis, same length as axes
shift_list = normalized_scalar_param_list(shift, length=len(axes),
param_conv=bool)
# Full-length vectors
stride = grid.stride
shape = np.array(grid.shape)
rmin = grid.min_pt.copy()
rmax = grid.max_pt.copy()
rshape = list(shape)
# Shifted axes (full length to avoid ugly double indexing)
shifted = np.zeros(grid.ndim, dtype=bool)
shifted[axes] = shift_list
rmin[shifted] = -np.pi / stride[shifted]
# Length min->max increases by double the shift, so we
# have to compensate by a full stride
rmax[shifted] = (-rmin[shifted] -
2 * np.pi / (stride[shifted] * shape[shifted]))
# Non-shifted axes
not_shifted = np.zeros(grid.ndim, dtype=bool)
not_shifted[axes] = np.logical_not(shift_list)
rmin[not_shifted] = ((-1.0 + 1.0 / shape[not_shifted]) *
np.pi / stride[not_shifted])
rmax[not_shifted] = -rmin[not_shifted]
# Change last axis shape and max if halfcomplex
if halfcomplex:
rshape[axes[-1]] = shape[axes[-1]] // 2 + 1
# - Odd and shifted: - stride / 2
# - Even and not shifted: + stride / 2
# - Otherwise: 0
last_odd = shape[axes[-1]] % 2 == 1
last_shifted = shift_list[-1]
half_rstride = np.pi / (shape[axes[-1]] * stride[axes[-1]])
if last_odd and last_shifted:
rmax[axes[-1]] = -half_rstride
elif not last_odd and not last_shifted:
rmax[axes[-1]] = half_rstride
else:
rmax[axes[-1]] = 0
return uniform_grid(rmin, rmax, rshape)
def realspace_grid(recip_grid, x0, axes=None, halfcomplex=False,
halfcx_parity='even'):
"""Return the real space grid from the given reciprocal grid.
Given a reciprocal grid::
xi[j] = xi[0] + j * sigma,
with a multi-index ``j = (j[0], ..., j[d-1])`` in the range
``0 <= j < M``, this function calculates the original grid::
x[k] = x[0] + k * s
by using a provided ``x[0]`` and calculating the stride ``s``.
If the reciprocal grid is interpreted as coming from a usual
complex-to-complex FFT, it is ``N == M``, and the stride is::
s = 2*pi / (sigma * N)
For a reciprocal grid from a real-to-complex (half-complex) FFT,
it is ``M[i] = floor(N[i]/2) + 1`` in the last transform axis ``i``.
To resolve the ambiguity regarding the parity of ``N[i]``, the
it must be specified if the output shape should be even or odd,
resulting in::
odd : N[i] = 2 * M[i] - 1
even: N[i] = 2 * M[i] - 2
The output stride is calculated with this ``N`` as above in this
case.
Parameters
----------
recip_grid : uniform `RectGrid`
Sampling grid in reciprocal space.
x0 : `array-like`
Desired minimum point of the real space grid.
axes : int or sequence of ints, optional
Dimensions in which to calculate the real space grid. The sequence
must have the same length as ``shift`` if the latter is given
as a sequence. ``None`` means "all axes".
halfcomplex : bool, optional
If ``True``, interpret the given grid as the reciprocal as used
in a half-complex FFT (see above). Otherwise, the grid is
regarded as being used in a complex-to-complex transform.
halfcx_parity : {'even', 'odd'}
Use this parity for the shape of the returned grid in the
last axis of ``axes`` in the case ``halfcomplex=True``
Returns
-------
irecip : uniform `RectGrid`
The inverse reciprocal grid.
"""
if axes is None:
axes = list(range(recip_grid.ndim))
else:
try:
axes = [int(axes)]
except TypeError:
axes = list(axes)
rstride = recip_grid.stride
rshape = recip_grid.shape
# Calculate shape of the output grid by adjusting in axes[-1]
irshape = list(rshape)
if halfcomplex:
if str(halfcx_parity).lower() == 'even':
irshape[axes[-1]] = 2 * rshape[axes[-1]] - 2
elif str(halfcx_parity).lower() == 'odd':
irshape[axes[-1]] = 2 * rshape[axes[-1]] - 1
else:
raise ValueError("`halfcomplex` parity '{}' not understood"
"".format(halfcx_parity))
irmin = np.asarray(x0)
irshape = np.asarray(irshape)
irstride = np.copy(rstride)
irstride[axes] = 2 * np.pi / (irshape[axes] * rstride[axes])
irmax = irmin + (irshape - 1) * irstride
return uniform_grid(irmin, irmax, irshape)
def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None):
"""Pre-process the real-space data before DFT.
This function multiplies the given data with the separable
function::
p(x) = exp(+- 1j * dot(x - x[0], xi[0]))
where ``x[0]`` and ``xi[0]`` are the minimum coodinates of
the real-space and reciprocal grids, respectively. The sign of
the exponent depends on the choice of ``sign``. In discretized
form, this function becomes an array::
p[k] = exp(+- 1j * k * s * xi[0])
If the reciprocal grid is not shifted, i.e. symmetric around 0,
it is ``xi[0] = pi/s * (-1 + 1/N)``, hence::
p[k] = exp(-+ 1j * pi * k * (1 - 1/N))
For a shifted grid, we have :math:``xi[0] = -pi/s``, thus the
array is given by::
p[k] = (-1)**k
Parameters
----------
arr : `array-like`
Array to be pre-processed. If its data type is a real
non-floating type, it is converted to 'float64'.
shift : bool or or sequence of bools, optional
If ``True``, the grid is shifted by half a stride in the negative
direction. With a sequence, this option is applied separately on
each axis.
axes : int or sequence of ints, optional
Dimensions in which to calculate the reciprocal. The sequence
must have the same length as ``shift`` if the latter is given
as a sequence.
Default: all axes.
sign : {'-', '+'}, optional
Sign of the complex exponent.
out : `numpy.ndarray`, optional
Array in which the result is stored. If ``out is arr``,
an in-place modification is performed. For real data type,
this is only possible for ``shift=True`` since the factors are
complex otherwise.
Returns
-------
out : `numpy.ndarray`
Result of the pre-processing. If ``out`` was given, the returned
object is a reference to it.
Notes
-----
If ``out`` is not specified, the data type of the returned array
is the same as that of ``arr`` except when ``arr`` has real data
type and ``shift`` is not ``True``. In this case, the return type
is the complex counterpart of ``arr.dtype``.
"""
arr = np.asarray(arr)
if not is_scalar_dtype(arr.dtype):
raise ValueError('array has non-scalar data type {}'
''.format(dtype_repr(arr.dtype)))
elif is_real_dtype(arr.dtype) and not is_real_floating_dtype(arr.dtype):
arr = arr.astype('float64')
if axes is None:
axes = list(range(arr.ndim))
else:
try:
axes = [int(axes)]
except TypeError:
axes = list(axes)
shape = arr.shape
shift_list = normalized_scalar_param_list(shift, length=len(axes),
param_conv=bool)
# Make a copy of arr with correct data type if necessary, or copy values.
if out is None:
if is_real_dtype(arr.dtype) and not all(shift_list):
out = np.array(arr, dtype=complex_dtype(arr.dtype), copy=True)
else:
out = arr.copy()
else:
out[:] = arr
if is_real_dtype(out.dtype) and not shift:
raise ValueError('cannot pre-process real input in-place without '
'shift')
if sign == '-':
imag = -1j
elif sign == '+':
imag = 1j
else:
raise ValueError("`sign` '{}' not understood".format(sign))
def _onedim_arr(length, shift):
if shift:
# (-1)^indices
factor = np.ones(length, dtype=out.dtype)
factor[1::2] = -1
else:
factor = np.arange(length, dtype=out.dtype)
factor *= -imag * np.pi * (1 - 1.0 / length)
np.exp(factor, out=factor)
return factor.astype(out.dtype, copy=False)
onedim_arrs = []
for axis, shift in zip(axes, shift_list):
length = shape[axis]
onedim_arrs.append(_onedim_arr(length, shift))
fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out)
return out
def _interp_kernel_ft(norm_freqs, interp):
"""Scaled FT of a one-dimensional interpolation kernel.
For normalized frequencies ``-1/2 <= xi <= 1/2``, this
function returns::
sinc(pi * xi)**k / sqrt(2 * pi)
where ``k=1`` for 'nearest' and ``k=2`` for 'linear' interpolation.
Parameters
----------
norm_freqs : `numpy.ndarray`
Normalized frequencies between -1/2 and 1/2
interp : {'nearest', 'linear'}
Type of interpolation kernel
Returns
-------
ker_ft : `numpy.ndarray`
Values of the kernel FT at the given frequencies
"""
# Numpy's sinc(x) is equal to the 'math' sinc(pi * x)
ker_ft = np.sinc(norm_freqs)
interp_ = str(interp).lower()
if interp_ == 'nearest':
pass
elif interp_ == 'linear':
ker_ft **= 2
else:
raise ValueError("`interp` '{}' not understood".format(interp))
ker_ft /= np.sqrt(2 * np.pi)
return ker_ft
def dft_postprocess_data(arr, real_grid, recip_grid, shift, axes,
interp, sign='-', op='multiply', out=None):
"""Post-process the Fourier-space data after DFT.
This function multiplies the given data with the separable
function::
q(xi) = exp(+- 1j * dot(x[0], xi)) * s * phi_hat(xi_bar)
where ``x[0]`` and ``s`` are the minimum point and the stride of
the real-space grid, respectively, and ``phi_hat(xi_bar)`` is the FT
of the interpolation kernel. The sign of the exponent depends on the
choice of ``sign``. Note that for ``op='divide'`` the
multiplication with ``s * phi_hat(xi_bar)`` is replaced by a
division with the same array.
In discretized form on the reciprocal grid, the exponential part
of this function becomes an array::
q[k] = exp(+- 1j * dot(x[0], xi[k]))
and the arguments ``xi_bar`` to the interpolation kernel
are the normalized frequencies::
for 'shift=True' : xi_bar[k] = -pi + pi * (2*k) / N
for 'shift=False' : xi_bar[k] = -pi + pi * (2*k+1) / N
See [Pre+2007]_, Section 13.9 "Computing Fourier Integrals Using
the FFT" for a similar approach.
Parameters
----------
arr : `array-like`
Array to be pre-processed. An array with real data type is
converted to its complex counterpart.
real_grid : uniform `RectGrid`
Real space grid in the transform.
recip_grid : uniform `RectGrid`
Reciprocal grid in the transform
shift : bool or sequence of bools
If ``True``, the grid is shifted by half a stride in the negative
direction in the corresponding axes. The sequence must have the
same length as ``axes``.
axes : int or sequence of ints
Dimensions along which to take the transform. The sequence must
have the same length as ``shifts``.
interp : string or sequence of strings
Interpolation scheme used in the real-space.
sign : {'-', '+'}, optional
Sign of the complex exponent.
op : {'multiply', 'divide'}, optional
Operation to perform with the stride times the interpolation
kernel FT
out : `numpy.ndarray`, optional
Array in which the result is stored. If ``out is arr``, an
in-place modification is performed.
Returns
-------
out : `numpy.ndarray`
Result of the post-processing. If ``out`` was given, the returned
object is a reference to it.
"""
arr = np.asarray(arr)
if is_real_floating_dtype(arr.dtype):
arr = arr.astype(complex_dtype(arr.dtype))
elif not is_complex_floating_dtype(arr.dtype):
raise ValueError('array data type {} is not a complex floating point '
'data type'.format(dtype_repr(arr.dtype)))
if out is None:
out = arr.copy()
elif out is not arr:
out[:] = arr
if axes is None:
axes = list(range(arr.ndim))
else:
try:
axes = [int(axes)]
except TypeError:
axes = list(axes)
shift_list = normalized_scalar_param_list(shift, length=len(axes),
param_conv=bool)
if sign == '-':
imag = -1j
elif sign == '+':
imag = 1j
else:
raise ValueError("`sign` '{}' not understood".format(sign))
op, op_in = str(op).lower(), op
if op not in ('multiply', 'divide'):
raise ValueError("kernel `op` '{}' not understood".format(op_in))
# Make a list from interp if that's not the case already
try:
# Duck-typed string check
interp + ''
except TypeError:
pass
else:
interp = [str(interp).lower()] * arr.ndim
onedim_arrs = []
for ax, shift, intp in zip(axes, shift_list, interp):
x = real_grid.min_pt[ax]
xi = recip_grid.coord_vectors[ax]
# First part: exponential array
onedim_arr = np.exp(imag * x * xi)
# Second part: interpolation kernel
len_dft = recip_grid.shape[ax]
len_orig = real_grid.shape[ax]
halfcomplex = (len_dft < len_orig)
odd = len_orig % 2
fmin = -0.5 if shift else -0.5 + 1.0 / (2 * len_orig)
if halfcomplex:
# maximum lies around 0, possibly half a cell left or right of it
if shift and odd:
fmax = - 1.0 / (2 * len_orig)
elif not shift and not odd:
fmax = 1.0 / (2 * len_orig)
else:
fmax = 0.0
else: # not halfcomplex
# maximum lies close to 0.5, half or full cell left of it
if shift:
# -0.5 + (N-1)/N = 0.5 - 1/N
fmax = 0.5 - 1.0 / len_orig
else:
# -0.5 + 1/(2*N) + (N-1)/N = 0.5 - 1/(2*N)
fmax = 0.5 - 1.0 / (2 * len_orig)
freqs = np.linspace(fmin, fmax, num=len_dft)
stride = real_grid.stride[ax]
if op == 'multiply':
onedim_arr *= stride * _interp_kernel_ft(freqs, intp)
else:
onedim_arr /= stride * _interp_kernel_ft(freqs, intp)
onedim_arrs.append(onedim_arr.astype(out.dtype, copy=False))
fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out)
return out
def reciprocal_space(space, axes=None, halfcomplex=False, shift=True,
**kwargs):
"""Return the range of the Fourier transform on ``space``.
Parameters
----------
space : `DiscreteLp`
Real space whose reciprocal is calculated. It must be
uniformly discretized.
axes : sequence of ints, optional
Dimensions along which the Fourier transform is taken.
Default: all axes
halfcomplex : bool, optional
If ``True``, take only the negative frequency part along the last
axis for. For ``False``, use the full frequency space.
This option can only be used if ``space`` is a space of
real-valued functions.
shift : bool or sequence of bools, optional
If ``True``, the reciprocal grid is shifted by half a stride in
the negative direction. With a boolean sequence, this option
is applied separately to each axis.
If a sequence is provided, it must have the same length as
``axes`` if supplied. Note that this must be set to ``True``
in the halved axis in half-complex transforms.
Default: ``True``
impl : string, optional
Implementation back-end for the created space.
Default: ``'numpy'``
exponent : float, optional
Create a space with this exponent. By default, the conjugate
exponent ``q = p / (p - 1)`` of the exponent of ``space`` is
used, where ``q = inf`` for ``p = 1`` and vice versa.
dtype : optional
Complex data type of the created space. By default, the
complex counterpart of ``space.dtype`` is used.
Returns
-------
rspace : `DiscreteLp`
Reciprocal of the input ``space``. If ``halfcomplex=True``, the
upper end of the domain (where the half space ends) is chosen to
coincide with the grid node.
"""
if not isinstance(space, DiscreteLp):
raise TypeError('`space` {!r} is not a `DiscreteLp` instance'
''.format(space))
if not space.is_uniform:
raise ValueError('`space` is not uniformly discretized')
if axes is None:
axes = tuple(range(space.ndim))
axes = normalized_axes_tuple(axes, space.ndim)
if halfcomplex and space.field != RealNumbers():
raise ValueError('`halfcomplex` option can only be used with real '
'spaces')
exponent = kwargs.pop('exponent', None)
if exponent is None:
exponent = conj_exponent(space.exponent)
dtype = kwargs.pop('dtype', None)
if dtype is None:
dtype = complex_dtype(space.dtype)
else:
if not is_complex_floating_dtype(dtype):
raise ValueError('{} is not a complex data type'
''.format(dtype_repr(dtype)))
impl = kwargs.pop('impl', 'numpy')
# Calculate range
recip_grid = reciprocal_grid(space.grid, shift=shift,
halfcomplex=halfcomplex, axes=axes)
# Make a partition with nodes on the boundary in the last transform axis
# if `halfcomplex == True`, otherwise a standard partition.
if halfcomplex:
max_pt = {axes[-1]: recip_grid.max_pt[axes[-1]]}
part = uniform_partition_fromgrid(recip_grid, max_pt=max_pt)
else:
part = uniform_partition_fromgrid(recip_grid)
# Use convention of adding a hat to represent fourier transform of variable
axis_labels = list(space.axis_labels)
for i in axes:
# Avoid double math
label = axis_labels[i].replace('$', '')
axis_labels[i] = '$\^{{{}}}$'.format(label)
recip_spc = uniform_discr_frompartition(part, exponent=exponent,
dtype=dtype, impl=impl,
axis_labels=axis_labels)
return recip_spc
if __name__ == '__main__':
from doctest import testmod, NORMALIZE_WHITESPACE
testmod(optionflags=NORMALIZE_WHITESPACE)
|
gpl-3.0
| -1,737,695,355,543,527,700
| 34.180577
| 79
| 0.598516
| false
| 3.773438
| false
| false
| false
|
allenai/document-qa
|
docqa/text_preprocessor.py
|
1
|
7061
|
from collections import Counter
from typing import List, Optional, Tuple
import numpy as np
from tqdm import tqdm
from docqa.utils import flatten_iterable
from docqa.data_processing.document_splitter import ExtractedParagraphWithAnswers, MergeParagraphs, ExtractedParagraph
from docqa.data_processing.multi_paragraph_qa import ParagraphWithAnswers
from docqa.configurable import Configurable
from docqa.squad.squad_data import SquadCorpus
from docqa.triviaqa.build_span_corpus import TriviaQaWebDataset
class TextPreprocessor(Configurable):
""" Preprocess text input, must be deterministic. Only used thus far adding special indicator tokens """
def encode_extracted_paragraph(self, question: List[str], paragraph: ExtractedParagraphWithAnswers):
text, answers, _ = self.encode_paragraph(question, paragraph.text,
paragraph.start == 0, paragraph.answer_spans)
return ParagraphWithAnswers(text, answers)
def encode_text(self, question: List[str], paragraph: ExtractedParagraph):
text, _, _ = self.encode_paragraph(question, paragraph.text, paragraph.start == 0,
np.zeros((0, 2), dtype=np.int32))
return text
def encode_paragraph(self, question: List[str], paragraphs: List[List[str]],
is_first, answer_spans: np.ndarray,
token_spans=None) -> Tuple[List[str], np.ndarray, Optional[np.ndarray]]:
"""
Returns updated (and flattened) text, answer_spans, and token_spans
"""
raise NotImplementedError()
def special_tokens(self) -> List[str]:
return []
class WithIndicators(TextPreprocessor):
"""
Adds a document or group start token before the text, and a paragraph token between each
between in each paragraph.
"""
PARAGRAPH_TOKEN = "%%PARAGRAPH%%"
DOCUMENT_START_TOKEN = "%%DOCUMENT%%"
PARAGRAPH_GROUP = "%%PARAGRAPH_GROUP%%"
def __init__(self, remove_cross_answer: bool=True, para_tokens: bool=True, doc_start_token: bool=True):
self.remove_cross_answer = remove_cross_answer
self.doc_start_token = doc_start_token
self.para_tokens = para_tokens
def special_tokens(self) -> List[str]:
tokens = [self.PARAGRAPH_GROUP]
if self.doc_start_token:
tokens.append(self.DOCUMENT_START_TOKEN)
if self.para_tokens:
tokens.append(self.PARAGRAPH_TOKEN)
return tokens
def encode_paragraph(self, question: List[str], paragraphs: List[List[str]], is_first, answer_spans: np.ndarray, inver=None):
out = []
offset = 0
if self.doc_start_token and is_first:
out.append(self.DOCUMENT_START_TOKEN)
else:
out.append(self.PARAGRAPH_GROUP)
if inver is not None:
inv_out = [np.zeros((1, 2), dtype=np.int32)]
else:
inv_out = None
offset += 1
spans = answer_spans + offset
out += paragraphs[0]
offset += len(paragraphs[0])
on_ix = len(paragraphs[0])
if inv_out is not None:
inv_out.append(inver[:len(paragraphs[0])])
for sent in paragraphs[1:]:
if self.remove_cross_answer:
remove = np.logical_and(spans[:, 0] < offset, spans[:, 1] >= offset)
spans = spans[np.logical_not(remove)]
if self.para_tokens:
spans[spans[:, 0] >= offset, 0] += 1
spans[spans[:, 1] >= offset, 1] += 1
out.append(self.PARAGRAPH_TOKEN)
if inv_out is not None:
if len(inv_out) == 0 or len(inv_out[-1]) == 0:
inv_out.append(np.zeros((1, 2), dtype=np.int32))
else:
inv_out.append(np.full((1, 2), inv_out[-1][-1][1], dtype=np.int32))
offset += 1
out += sent
offset += len(sent)
if inv_out is not None:
inv_out.append(inver[on_ix:on_ix+len(sent)])
on_ix += len(sent)
return out, spans, None if inv_out is None else np.concatenate(inv_out)
def __setstate__(self, state):
if "state" in state:
state["state"]["doc_start_token"] = True
state["state"]["para_tokens"] = True
else:
if "doc_start_token" not in state:
state["doc_start_token"] = True
if "para_tokens" not in state:
state["para_tokens"] = True
super().__setstate__(state)
def check_preprocess():
data = TriviaQaWebDataset()
merge = MergeParagraphs(400)
questions = data.get_dev()
pre = WithIndicators(False)
remove_cross = WithIndicators(True)
rng = np.random.RandomState(0)
rng.shuffle(questions)
for q in tqdm(questions[:1000]):
doc = rng.choice(q.all_docs, 1)[0]
text = data.evidence.get_document(doc.doc_id, n_tokens=800)
paras = merge.split_annotated(text, doc.answer_spans)
para = paras[np.random.randint(0, len(paras))]
built = pre.encode_extracted_paragraph(q.question, para)
expected_text = flatten_iterable(para.text)
if expected_text != [x for x in built.text if x not in pre.special_tokens()]:
raise ValueError()
expected = [expected_text[s:e+1] for s, e in para.answer_spans]
expected = Counter([tuple(x) for x in expected])
actual = [tuple(built.text[s:e+1]) for s,e in built.answer_spans]
actual_cleaned = Counter(tuple(z for z in x if z not in pre.special_tokens()) for x in actual)
if actual_cleaned != expected:
raise ValueError()
r_built = remove_cross.encode_extracted_paragraph(q.question, para)
rc = Counter(tuple(r_built.text[s:e + 1]) for s, e in r_built.answer_spans)
removed = Counter()
for w in actual:
if all(x not in pre.special_tokens() for x in w):
removed[w] += 1
if rc != removed:
raise ValueError()
def check_preprocess_squad():
data = SquadCorpus().get_train()
remove_cross = WithIndicators(True)
for doc in tqdm(data):
for para in doc.paragraphs:
q = para.questions[np.random.randint(0, len(para.questions))]
text, ans, inv = remove_cross.encode_paragraph(q.words, para.text, para.paragraph_num == 0,
q.answer.answer_spans, para.spans)
if len(inv) != len(text):
raise ValueError()
for i in range(len(inv)-1):
if inv[i, 0] > inv[i+1, 0]:
raise ValueError()
for (s1, e1), (s2, e2) in zip(ans, q.answer.answer_spans):
if tuple(inv[s1]) != tuple(para.spans[s2]):
raise ValueError()
if tuple(inv[e1]) != tuple(para.spans[e2]):
raise ValueError()
if __name__ == "__main__":
check_preprocess_squad()
|
apache-2.0
| 8,086,274,971,685,163,000
| 37.172973
| 129
| 0.584195
| false
| 3.712408
| false
| false
| false
|
hylje/tekis
|
tekis/flatpages/migrations/0003_auto_20160221_0250.py
|
1
|
1533
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-21 00:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flatpages', '0002_auto_20160221_0006'),
]
operations = [
migrations.CreateModel(
name='Sponsor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('url', models.URLField()),
('logo', models.ImageField(upload_to='sponsors/')),
('titletext', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
],
options={
'ordering': ('name',),
},
),
migrations.AlterField(
model_name='flatpage',
name='menu_index',
field=models.IntegerField(default=0, help_text='Menus are sorted ascending by this value. The first menu item in a category is the category link itself. <strong>Note:</strong> The first menu item in the top level category should be the front page.'),
),
migrations.AlterField(
model_name='flatpage',
name='published',
field=models.BooleanField(default=False, help_text='Published pages show up on the menu. Unpublished pages can be reached over direct link.'),
),
]
|
bsd-3-clause
| -2,971,475,573,143,553,500
| 38.307692
| 262
| 0.580561
| false
| 4.430636
| false
| false
| false
|
kowey/attelo
|
attelo/harness/parse.py
|
1
|
5021
|
'''
Control over attelo parsers as might be needed for a test harness
'''
from __future__ import print_function
from os import path as fp
import os
import sys
from joblib import (delayed)
from ..io import (write_predictions_output)
from attelo.decoding.util import (prediction_to_triples)
from attelo.fold import (select_training,
select_testing)
from attelo.harness.util import (makedirs)
def _eval_banner(econf, hconf, fold):
"""
Which combo of eval parameters are we running now?
"""
msg = ("Reassembling "
"fold {fnum} [{dset}]\t"
"parser: {parser}")
return msg.format(fnum=fold,
dset=hconf.dataset,
parser=econf.parser.key)
def _tmp_output_filename(path, suffix):
"""
Temporary filename for output file segment
"""
return fp.join(fp.dirname(path),
'_' + fp.basename(path) + '.' + suffix)
def concatenate_outputs(mpack, output_path):
"""
(For use after :py:func:`delayed_main_for_harness`)
Concatenate temporary per-group outputs into a single
combined output
"""
tmpfiles = [_tmp_output_filename(output_path, d)
for d in sorted(mpack.keys())]
with open(output_path, 'wb') as file_out:
for tfile in tmpfiles:
with open(tfile, 'rb') as file_in:
file_out.write(file_in.read())
for tmpfile in tmpfiles:
os.remove(tmpfile)
def _parse_group(dpack, parser, output_path):
'''
parse a single group and write its output
score the predictions if we have
:rtype Count or None
'''
dpack = parser.transform(dpack)
# we trust the parser to select what it thinks is its best prediction
prediction = prediction_to_triples(dpack)
write_predictions_output(dpack, prediction, output_path)
def jobs(mpack, parser, output_path):
"""
Return a list of delayed decoding jobs for the various
documents in this group
"""
res = []
tmpfiles = [_tmp_output_filename(output_path, d)
for d in mpack.keys()]
for tmpfile in tmpfiles:
if fp.exists(tmpfile):
os.remove(tmpfile)
for onedoc, dpack in mpack.items():
tmp_output_path = _tmp_output_filename(output_path, onedoc)
res.append(delayed(_parse_group)(dpack, parser, tmp_output_path))
return res
def learn(hconf, econf, dconf, fold):
"""
Run the learners for the given configuration
"""
if fold is None:
subpacks = dconf.pack
parent_dir = hconf.combined_dir_path()
else:
subpacks = select_training(dconf.pack, dconf.folds, fold)
parent_dir = hconf.fold_dir_path(fold)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
cache = hconf.model_paths(econf.learner, fold)
print('learning ', econf.key, '...', file=sys.stderr)
dpacks = subpacks.values()
targets = [d.target for d in dpacks]
econf.parser.payload.fit(dpacks, targets, cache=cache)
def delayed_decode(hconf, dconf, econf, fold):
"""
Return possible futures for decoding groups within
this model/decoder combo for the given fold
"""
if fold is None and hconf.test_evaluation is None:
return []
if _say_if_decoded(hconf, econf, fold, stage='decoding'):
return []
output_path = hconf.decode_output_path(econf, fold)
makedirs(fp.dirname(output_path))
if fold is None:
subpack = dconf.pack
else:
subpack = select_testing(dconf.pack, dconf.folds, fold)
parser = econf.parser.payload
return jobs(subpack, parser, output_path)
def decode_on_the_fly(hconf, dconf, fold):
"""
Learn each parser, returning decoder jobs as each is learned.
Return a decoder job generator that should hopefully allow us
to effectively learn and decode in parallel.
"""
for econf in hconf.evaluations:
learn(hconf, econf, dconf, fold)
for job in delayed_decode(hconf, dconf, econf, fold):
yield job
def _say_if_decoded(hconf, econf, fold, stage='decoding'):
"""
If we have already done the decoding for a given config
and fold, say so and return True
"""
if fp.exists(hconf.decode_output_path(econf, fold)):
print(("skipping {stage} {parser} "
"(already done)").format(stage=stage,
parser=econf.parser.key),
file=sys.stderr)
return True
else:
return False
def post_decode(hconf, dconf, econf, fold):
"""
Join together output files from this model/decoder combo
"""
if _say_if_decoded(hconf, econf, fold, stage='reassembly'):
return
print(_eval_banner(econf, hconf, fold), file=sys.stderr)
if fold is None:
subpack = dconf.pack
else:
subpack = select_testing(dconf.pack, dconf.folds, fold)
concatenate_outputs(subpack,
hconf.decode_output_path(econf, fold))
|
gpl-3.0
| 2,330,291,924,736,310,300
| 28.710059
| 73
| 0.626967
| false
| 3.651636
| false
| false
| false
|
DailyActie/Surrogate-Model
|
surrogate/selection/selRoulette.py
|
1
|
2509
|
# MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: MIT License
# Create: 2016-12-02
import random
from operator import attrgetter
def selRoulette(individuals, k=1):
"""Select *k* individuals from the input *individuals* using *k*
spins of a roulette. The selection is made by looking only at the first
objective of each individual. The list returned contains references to
the input *individuals*.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.
This function uses the :func:`~random.random` function from the python base
:mod:`random` module.
.. warning::
The roulette selection by definition cannot be used for minimization
or when the fitness can be smaller or equal to 0.
"""
s_inds = sorted(individuals, key=attrgetter("fitness"), reverse=True)
# TODO 20161204 individual property fitness.values[]
# sum_fits = sum(ind.fitness.values[0] for ind in individuals)
sum_fits = sum(ind.fitness for ind in individuals)
chosen = []
for i in xrange(k):
u = random.random() * sum_fits
sum_ = 0
for ind in s_inds:
# sum_ += ind.fitness.values[0]
sum_ += ind.fitness
if sum_ > u:
chosen.append(ind)
break
return chosen
|
mit
| 1,467,734,263,457,390,600
| 38.203125
| 80
| 0.707852
| false
| 4.099673
| false
| false
| false
|
aroth-arsoft/arsoft-web-crashupload
|
app/crashdump/utils.py
|
1
|
24246
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
crashdump_use_jinja2 = False
def _(msg):
return msg
def tag_a(name, title=None, href=None, alt=None):
from xml.etree.ElementTree import Element, tostring
a = Element('a')
a.text = name
if href:
a.set('href', href)
if title:
a.set('title', title)
if alt:
a.set('alt', alt)
return tostring(a, encoding="utf8", method='html').decode()
def _hex_format(number, prefix='0x', width=None, bits=None):
if isinstance(number, str):
try:
number = int(number)
except ValueError:
number = None
if number is None:
return '(none)'
if bits is not None:
if bits == 32:
number = number & 0xffffffff
if width is None:
width = 8
elif bits == 64:
number = number & 0xffffffffffffffff
if width is None:
width = 16
if width is None:
if number > 2**48:
width = 16
elif number > 2**40:
width = 12
elif number > 2**32:
width = 10
elif number > 2**24:
width = 8
elif number > 2**16:
width = 6
elif number > 2**8:
width = 4
else:
width = 2
fmt = '%%0%ix' % width
return prefix + fmt % number
def hex_format(number, prefix='0x', width=None, bits=None):
if isinstance(number, list):
nums = []
for n in number:
nums.append(_hex_format(n, prefix, width, bits))
return ','.join(nums)
else:
return _hex_format(number, prefix, width, bits)
def hex_format_bits(number, bits):
return hex_format(number, bits=bits)
def addr_format(number, prefix='0x', bits=64):
if number == 0:
return 'NULL'
elif number < 256:
return hex_format(number, 'NULL+' + prefix, bits=bits)
else:
return hex_format(number, prefix, bits=bits)
def addr_format_64(number, prefix='0x'):
if number == 0:
return 'NULL'
elif number < 256:
return hex_format(number, 'NULL+' + prefix, bits=64)
else:
return hex_format(number, prefix, bits=64)
def addr_format_32(number, prefix='0x'):
if number == 0:
return 'NULL'
elif number < 256:
return hex_format(number, 'NULL+' + prefix, bits=32)
else:
return hex_format(number, prefix, bits=32)
def addr_format_bits(number, bits=64):
return addr_format(number, bits=bits)
def exception_code(platform_type, code, name):
if platform_type is None:
return 'Platform unknown'
elif platform_type == 'Linux':
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Unix_signal')
elif platform_type == 'Windows NT':
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Windows_NT')
elif platform_type == 'Windows':
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Microsoft_Windows')
else:
return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Special:Search/' + str(platform_type))
def format_bool_yesno(val):
if isinstance(val, str) or isinstance(val, unicode):
try:
val = bool(val)
except ValueError:
val = None
if val is None:
return '(none)'
elif val == True:
return _('yes')
elif val == False:
return _('no')
else:
return _('neither')
def format_source_line(source, line, line_offset=None, source_url=None):
if source is None:
return _('unknown')
else:
title = str(source) + ':' + str(line)
if line_offset is not None:
title += '+' + hex_format(line_offset)
if source_url is not None:
href = source_url
else:
href='file:///' + str(source)
return tag_a(title, href=href)
def format_function_plus_offset(function, funcoff=None):
if function is None:
return _('unknown')
else:
if funcoff:
return str(function) + '+' + hex_format(funcoff)
else:
return str(function)
def str_or_unknown(str):
if str is None:
return _('unknown')
else:
return str
def format_cpu_type(cputype):
cputype = cputype.lower()
if cputype == 'amd64':
href='http://en.wikipedia.org/wiki/X86-64'
title = 'x86-64 (also known as x64, x86_64 and AMD64)'
elif cputype == 'x86':
href='http://en.wikipedia.org/wiki/X86'
title = 'x86 (also known as i386)'
elif cputype == 'mips':
href='http://en.wikipedia.org/wiki/MIPS_instruction_set'
title = 'MIPS instruction set'
elif cputype == 'alpha':
href='http://en.wikipedia.org/wiki/DEC_Alpha'
title = 'Alpha, originally known as Alpha AXP'
elif cputype == 'alpha64':
href='http://en.wikipedia.org/wiki/DEC_Alpha'
title = 'Alpha64, originally known as Alpha AXP'
elif cputype == 'powerpc':
href='http://en.wikipedia.org/wiki/PowerPC'
title = 'PowerPC'
elif cputype == 'powerpc64':
href='http://en.wikipedia.org/wiki/Ppc64'
title = 'PowerPC64 or ppc64'
elif cputype == 'arm':
href='http://en.wikipedia.org/wiki/ARM_architecture'
title = 'ARM'
elif cputype == 'arm64':
href='http://en.wikipedia.org/wiki/ARM_architecture#64-bit'
title = 'ARM 64-bit'
elif cputype == 'sparc':
href='http://en.wikipedia.org/wiki/SPARC'
title = 'SPARC ("scalable processor architecture")'
elif cputype == 'ia64':
href='http://en.wikipedia.org/wiki/Itanium'
title = 'Intel Itanium architecture (IA-64)'
elif cputype == 'msil':
href='http://en.wikipedia.org/wiki/Common_Intermediate_Language'
title = 'Microsoft Intermediate Language (MSIL)'
elif cputype == 'x64 wow':
href='http://en.wikipedia.org/wiki/WoW64'
title = 'Microsoft WoW64'
else:
href = 'http://en.wikipedia.org/wiki/Central_processing_unit'
title = 'Unknown:%s' % cputype
return tag_a(title, title=cputype, href=href)
def format_cpu_vendor(vendor):
if vendor == 'AuthenticAMD':
title = 'AMD'
href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'
elif vendor == 'GenuineIntel':
title = 'Intel'
href = 'http://en.wikipedia.org/wiki/Intel'
elif vendor == 'Microsoft Hv':
title = 'Microsoft Hyper-V'
href = 'http://en.wikipedia.org/wiki/Hyper-V'
elif vendor == 'VMwareVMware':
title = 'VMware'
href = 'http://en.wikipedia.org/wiki/VMware'
elif vendor == 'KVMKVMKVMKVM':
title = 'KVM'
href = 'http://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine'
elif vendor == 'XenVMMXenVMM':
title = 'Xen'
href = 'http://en.wikipedia.org/wiki/Xen'
else:
title = vendor
href = 'http://en.wikipedia.org/wiki/List_of_x86_manufacturers'
return tag_a(title, title=vendor, href=href)
def format_cpu_name(vendor, name):
# http://en.wikipedia.org/wiki/CPUID
# http://www.sandpile.org/x86/cpuid.htm
if vendor == 'AuthenticAMD':
if name is None:
title = 'Unknown AMD CPU'
href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'
elif name.startswith('AMD Ryzen'):
href = 'https://en.wikipedia.org/wiki/Ryzen'
title = 'AMD Ryzen'
elif name.startswith('AMD FX'):
href = 'http://en.wikipedia.org/wiki/List_of_AMD_FX_microprocessors'
title = 'AMD FX-series'
elif name.startswith('AMD Phenom'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Phenom_microprocessors'
title = 'AMD Phenom family'
elif name.startswith('AMD Opteron'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Opteron_microprocessors'
title = 'AMD Opteron family'
elif name.startswith('AMD Sempron'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Sempron_microprocessors'
title = 'AMD Sempron family'
elif name.startswith('AMD Turion'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_Turion_microprocessors'
title = 'AMD Turion family'
elif name.startswith('AMD A'):
href = 'https://en.wikipedia.org/wiki/List_of_AMD_accelerated_processing_unit_microprocessors'
title = 'AMD APU series'
else:
title = 'Unknown AMD CPU'
href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'
title = title + ' (%s)' % name
elif vendor == 'GenuineIntel':
if name is None:
title = 'Unknown Intel CPU'
href = 'https://en.wikipedia.org/wiki/List_of_Intel_microprocessors'
elif name.startswith('Intel(R) Core(TM) i3'):
title = 'Intel Core i3 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM) i5'):
title = 'Intel Core i5 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM) i7'):
title = 'Intel Core i7 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM) i9'):
title = 'Intel Core i9 series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Core(TM)'):
title = 'Unknown Intel Core series'
href = 'http://en.wikipedia.org/wiki/Intel_Core'
elif name.startswith('Intel(R) Xeon(R)') or name.startswith('Intel(R) Xeon(TM)'):
title = 'Intel Xeon series'
href = 'http://en.wikipedia.org/wiki/Xeon'
else:
title = 'Unknown Intel CPU'
href = 'https://en.wikipedia.org/wiki/List_of_Intel_microprocessors'
title = title + ' (%s)' % name
else:
title = name
href = 'http://en.wikipedia.org/wiki/List_of_x86_manufacturers'
return tag_a(name, title=title, href=href)
def format_distribution_id(distro_id):
if distro_id == 'Debian':
name = 'Debian'
href = 'http://www.debian.org'
elif distro_id == 'Ubuntu':
name = 'Ubuntu'
href = 'http://www.ubuntu.com'
else:
name = distro_id
href = 'http://distrowatch.com/' + distro_id
return tag_a(name, title=distro_id, href=href)
def format_distribution_codename(distro_id, distro_codename):
if distro_id == 'Debian':
name = '%s %s' % (distro_id.capitalize(), distro_codename.capitalize())
href = 'http://www.debian.org/%s%s' % (distro_id.capitalize(), distro_codename.capitalize())
elif distro_id == 'Ubuntu':
name = '%s %s' % (distro_id.capitalize(), distro_codename.capitalize())
href = 'http://ubuntuguide.org/wiki/%s_%s' % (distro_id.capitalize(), distro_codename.capitalize())
else:
name = distro_id
href = 'http://distrowatch.com/' + distro_id
return tag_a(name, title=distro_id, href=href)
def format_seconds(s):
if s is None:
return 'None'
elif s >= 3600:
hr = int(float(s) / 3600.0)
from math import fmod
m = fmod(float(s), 3600.0) / 60.0
return '%ihr %0.1fmin' % (hr, m)
elif s >= 60:
m = float(s) / 60.0
return '%0.1fmin' % m
elif s >= 1:
return '%0.1fs' % s
else:
return '%0.1fms' % ( s * 1000.0 )
def format_milliseconds(ms):
if ms is None:
return 'None'
elif ms > 1000:
s = float(ms) / 1000.0
return format_seconds(s)
else:
return '%ims' % ms
def format_trust_level(tl):
if tl == 0 or tl is None:
return 'Unknown'
elif tl == 1:
return 'Stack scan'
elif tl == 2:
return 'CFI scan'
elif tl == 3:
return 'FP'
elif tl == 4:
return 'CFI'
elif tl == 5:
return 'External'
elif tl == 6:
return 'IP'
else:
return 'unknown(%i)' % tl
_suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def format_size(nbytes):
if isinstance(nbytes, str):
try:
nbytes = int(nbytes)
except ValueError:
nbytes = None
if nbytes == 0: return '0 B'
elif nbytes is None: return 'None'
i = 0
while nbytes >= 1024 and i < len(_suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, _suffixes[i])
def format_memory_usagetype(usage):
if usage == 0 or usage is None:
return 'Unknown'
elif usage == 1:
return 'Stack'
elif usage == 2:
return 'TEB'
elif usage == 3:
return 'PEB'
elif usage == 4:
return 'Process Parameters'
elif usage == 5:
return 'Environment'
elif usage == 6:
return 'IP'
elif usage == 7:
return 'Process Heap Handles'
elif usage == 8:
return 'Process Heap'
elif usage == 9:
return 'TLS'
elif usage == 10:
return 'Thread info block'
else:
return 'unknown(%i)' % usage
def format_gl_extension_name(ext):
khronos_extension_base_url = 'https://www.khronos.org/registry/OpenGL/extensions'
unknown_extension_url = 'https://www.khronos.org/opengl/wiki/OpenGL_Extension'
title = ext
name = ext
href = unknown_extension_url
vendor = None
ext_name = None
if ext.startswith('GL_'):
vendor_end = ext.index('_', 3)
if vendor_end > 0:
vendor = ext[3:vendor_end]
ext_name = ext[3:]
elif ext.startswith('GLX_') or ext.startswith('WGL_'):
vendor_end = ext.index('_', 4)
if vendor_end > 0:
vendor = ext[4:vendor_end]
ext_name = ext
if vendor and ext_name:
href = khronos_extension_base_url + '/%s/%s.txt' % (vendor, ext_name)
return tag_a(name, title=title, href=href)
def format_version_number(num):
if isinstance(num, str) or isinstance(num, unicode):
try:
num = int(num)
except ValueError:
num = None
if num is None: return 'None'
m, n, o, p = (num >> 48) & 0xffff, (num >> 32) & 0xffff, (num >> 16) & 0xffff, (num >> 0) & 0xffff
return '%i.%i.%i.%i' % (m, n, o, p)
def format_platform_type(platform_type):
if platform_type is None:
return _('Platform unknown')
elif platform_type == 'Linux':
return tag_a('Linux', href='https://en.wikipedia.org/wiki/Linux')
elif platform_type == 'Windows NT':
return tag_a('Windows NT',href='https://en.wikipedia.org/wiki/Windows_NT')
elif platform_type == 'Windows':
return tag_a('Windows', href='https://en.wikipedia.org/wiki/Microsoft_Windows')
else:
return tag_a(platform_type, href='https://en.wikipedia.org/wiki/Special:Search/' + str(platform_type))
def _get_version_from_string(number_str):
elems = number_str.split('.')
major = 0
minor = 0
patch = 0
build = 0
if len(elems) >= 1:
major = int(elems[0])
if len(elems) >= 2:
minor = int(elems[1])
if len(elems) >= 3:
patch = int(elems[2])
if len(elems) >= 4:
build = int(elems[3])
return major, minor, patch, build
def _get_version_from_numbers(os_version_number, os_build_number):
print('_get_version_from_numbers %s, %s' % (os_version_number, os_build_number))
if isinstance(os_version_number, int):
major = os_version_number >> 48 & 0xffff
minor = os_version_number >> 32 & 0xffff
patch = os_version_number >> 16 & 0xffff
build = os_version_number & 0xffff
if build == 0 and os_build_number:
build = int(os_build_number) if os_build_number is not None else 0
else:
major, minor, patch, build = _get_version_from_string(os_version_number)
#print('%x, %s -> %i.%i.%i.%i' % (os_version_number, os_build_number, major, minor, patch, build))
return major, minor, patch, build
def get_os_version_number(platform_type, os_version_number, os_build_number):
if platform_type is None or os_version_number is None:
return 0
if platform_type == 'Linux':
major, minor, patch, build = _get_version_from_string(os_version_number)
elif platform_type == 'Windows NT':
major, minor, patch, build = _get_version_from_string(os_version_number)
if major >= 10:
build = patch
patch = 0
else:
major = 0
minor = 0
patch = 0
build = 0
ret = (major << 48) | (minor << 32) | (patch << 16) | build
print('ver in %s -> %x' % (os_version_number, ret))
return ret
def get_os_build_number(platform_type, os_version_number, os_build_number):
if platform_type is None or os_version_number is None:
return 0
if platform_type == 'Linux':
build = 0
elif platform_type == 'Windows NT':
major, minor, patch, build = _get_version_from_string(os_version_number)
if major >= 10:
build = patch
else:
build = 0
print('build in %s -> %x' % (os_version_number, build))
return build
def os_version_info(platform_type, os_version_number, os_build_number):
ret = {'text': 'unknown' }
if platform_type is None or os_version_number is None:
return ret
major, minor, patch, build = _get_version_from_numbers(os_version_number, os_build_number)
if platform_type == 'Linux':
ret['text'] = 'Linux %i.%i.%i.%i' % (major, minor, patch, build)
ret['href'] = 'https://en.wikipedia.org/wiki/Linux'
elif platform_type == 'Windows NT':
productName = 'Windows %i.%i' % (major, minor)
marketingName = None
if (major < 6):
productName = "Windows XP"
ret['short'] = 'WinXP'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_XP'
elif (major == 6 and minor == 0):
productName = "Windows Vista"
ret['short'] = 'WinVista'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_Vista'
elif (major == 6 and minor == 1):
productName = "Windows 7"
ret['short'] = 'Win7'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_7'
elif (major == 6 and minor == 2):
productName = "Windows 8"
ret['short'] = 'Win8'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_8'
elif (major == 6 and minor == 3):
productName = "Windows 8.1"
ret['short'] = 'Win8.1'
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_8'
elif (major == 10):
ret['href'] = 'https://en.wikipedia.org/wiki/Windows_10'
# See https://en.wikipedia.org/wiki/Windows_10_version_history
if build <= 10240:
ret['short'] = 'Win10'
productName = "Windows 10"
marketingName = ''
elif(build <= 10586):
ret['short'] = 'Win10/1511'
productName = "Windows 10 Version 1511"
marketingName = "November Update"
elif (build <= 14393):
ret['short'] = 'Win10/1607'
productName = "Windows 10 Version 1607"
marketingName = "Anniversary Update"
elif (build <= 15063):
ret['short'] = 'Win10/1703'
productName = "Windows 10 Version 1703"
marketingName = "Creators Update"
elif (build <= 16299):
ret['short'] = 'Win10/1709'
productName = "Windows 10 Version 1709"
marketingName = "Fall Creators Update"
elif (build <= 17134):
ret['short'] = 'Win10/1803'
productName = "Windows 10 Version 1803"
marketingName = "April 2018 Update"
elif (build <= 18204):
ret['short'] = 'Win10/1809'
productName = "Windows 10 Version 1809"
marketingName = "October 2018 Update"
elif (build <= 18362):
ret['short'] = 'Win10/1903'
productName = "Windows 10 Version 1903"
marketingName = "May 2019 Update"
elif (build <= 18363):
ret['short'] = 'Win10/1909'
productName = "Windows 10 Version 1909"
marketingName = "November 2019 Update"
elif (build <= 19041):
ret['short'] = 'Win10/2004'
productName = "Windows 10 Version 2004"
marketingName = "May 2020 Update"
elif (build <= 19042):
ret['short'] = 'Win10/1903'
productName = "Windows 10 Version 20H2"
marketingName = '' # TBA
else:
ret['short'] = 'Win10/TBA'
productName = 'Windows 10 Build %i' % build
if marketingName:
ret['text'] = '%s (%s)' % (productName, marketingName)
else:
ret['text'] = productName
ret['full'] = ret['text'] + ' %i.%i.%i.%i' % (major, minor, patch, build)
elif platform_type == 'Windows':
ret['text'] = 'Windows %i.%i' % (major, minor)
ret['href'] = 'https://en.wikipedia.org/wiki/Microsoft_Windows'
return ret
def format_os_version(platform_type, os_version_number, os_build_number):
info = os_version_info(platform_type, os_version_number, os_build_number)
if 'href' in info:
return tag_a(info.get('text'), href=info.get('href'))
else:
return info.get('text')
def format_os_version_short(platform_type, os_version_number, os_build_number):
info = os_version_info(platform_type, os_version_number, os_build_number)
if 'short' in info:
return info.get('short')
else:
return info.get('text')
def language_from_qlocale_language_enum(num):
_codes = {
0: 'Any language',
31: 'English',
42: 'German',
}
if num in _codes:
return _codes[num]
else:
return str(num)
# See https://doc.qt.io/qt-5/qlocale.html#Country-enum
def country_from_qlocale_country_enum(num):
_codes = {
0: 'Any country',
82: 'Germany',
224: 'United Kingdom',
225: 'United States',
}
if num in _codes:
return _codes[num]
else:
return str(num)
# https://doc.qt.io/qt-5/qlocale.html#Script-enum
def script_from_qlocale_script_enum(num):
_codes = {
0: 'Any script',
1: 'Arabic',
2: 'Cyrillic',
16: 'Greek',
7: 'Latin',
}
if num in _codes:
return _codes[num]
else:
return str(num)
def thread_extra_info(thread):
if thread is None:
return _('N/A')
elif thread.main_thread:
return '*@' if thread.exception else '@'
elif thread.rpc_thread:
return '*[RPC]' if thread.exception else '[RPC]'
elif thread.exception:
return '*'
else:
return ''
def format_thread(thread):
if thread is None:
return _('N/A')
else:
if thread.main_thread:
ret = _('Main thread')
elif thread.rpc_thread:
ret = _('RPC thread')
else:
ret = _('Thread')
ret = ret + ' ' + hex_format(thread.id)
if thread.name:
ret = ret + ' ' + thread.name
if thread.exception:
ret = ret + ' ' + _('with exception')
return ret
def format_stack_frame(frame):
if frame is None:
return _('N/A')
else:
if frame.function is None:
offset = frame.addr - frame.module_base
if frame.module:
return frame.module + '+' + hex_format(offset)
else:
return frame.addr
else:
return format_function_plus_offset(frame.function, frame.funcoff)
|
gpl-3.0
| -138,188,937,514,413,520
| 34.39562
| 137
| 0.558443
| false
| 3.501733
| false
| false
| false
|
libicocco/poser-hand-generator
|
createGraspICRA09.py
|
1
|
3429
|
# creatGraspICRA09.py - script for creating a hand poses database
#
# Copyright (c) 2009 Javier Romero
#
# Author: Javier Romero <jrgn@kth.se>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import poser
import linecache
import os
import setCamAZEL
import setTexture
from os.path import join
scene = poser.Scene()
basedir = os.path.dirname(os.path.abspath(__file__))
dir = join(basedir, 'out')
lightdir = join(basedir, 'lights')
taxonomyDir = join(basedir, 'taxonomy')
texture = join(basedir, 'Hand Texture2.TIF')
listpath = join(basedir, 'poses', 'handjointssavinglist.txt')
#lights=["light1.lt2","light2.lt2","light3.lt2","light4.lt2"]
lights = ["light1.lt2"]
nAz = 24
nEl = 12
nRo = 9
nFrames = 6
grasps = ["largeDiameter", "smallDiameter", "mediumWrap", "adductedThumb",
"lightTool", "prismatic4Finger", "prismatic3Finger",
"prismatic2Finger", "palmarPinch", "powerDisk", "powerSphere",
"precisionDisk", "precisionSphere", "tripod", "fixedHook", "lateral",
"indexFingerExtension", "extensionType", "distalType",
"writingTripod", "tripodVariation", "parallelExtension",
"adductionGrip", "tipPinch", "lateralTripod", "sphere4Finger",
"quadpod", "sphere3Finger", "stick", "palmarGrasp",
"ringGrasp", "ventralGrasp", "inferiorPincerGrasp"]
#poser.SetNumRenderThreads(4)
#poser.SetRenderInSeparateProcess(1)
for graspIndex in range(len(grasps)):
outdir = join(dir, '%02d' % (graspIndex+1))
if not os.path.isdir(outdir):
os.mkdir(outdir)
for lightindex in range(len(lights)):
jointFileName0 = join(taxonomyDir, "rest.txt")
jointFileName1 = join(taxonomyDir, grasps[graspIndex] + ".txt")
graspCode = (graspIndex)*(len(lights)) + lightindex + 1
# close and discard changes
poser.CloseDocument(1)
poser.OpenDocument(join(taxonomyDir, grasps[graspIndex] + ".pz3"))
scene.LoadLibraryLight(lightdir+lights[lightindex])
setTexture.setTexture(texture)
linecache.checkcache(jointFileName0)
linecache.checkcache(jointFileName1)
setCamAZEL.setRenderOptions(scale=0)
gnd = scene.Actor("GROUND")
gnd.SetVisible(0)
gnd.SetVisibleInRender(0)
gnd.SetVisibleInReflections(0)
ffly = scene.CurrentFireFlyOptions()
ffly.SetManual(1)
setCamAZEL.multiViewSeqRender(basedir, nAz, nEl, nRo, outdir,
jointFileName0, jointFileName1,
nFrames, graspCode, listpath=listpath,
fullSphere=True, f=70,
camName="RHand Camera")
|
gpl-2.0
| 4,005,655,278,173,334,000
| 39.313253
| 79
| 0.656168
| false
| 3.384995
| false
| false
| false
|
jeffmacinnes/pyneal
|
pyneal_scanner/utils/Siemens_utils.py
|
1
|
30222
|
""" Set of classes and methods specific to Siemens scanning environments
"""
from __future__ import print_function
from __future__ import division
import os
from os.path import join
import sys
import time
import re
import json
import glob
import logging
from threading import Thread
from queue import Queue
import numpy as np
import pydicom
import nibabel as nib
from nibabel.nicom import dicomreaders
import zmq
# regEx for Siemens style file naming
Siemens_filePattern = re.compile('\d{3}_\d{6}_\d{6}.dcm')
# regEx for pulling the volume field out of the mosaic file name
Siemens_mosaicVolumeNumberField = re.compile('(?<=\d{6}_)\d{6}')
Siemens_mosaicSeriesNumberField = re.compile('(?<=\d{3}_)\d{6}(?=_\d{6}.dcm)')
class Siemens_DirStructure():
""" Finding the names and paths of series directories in a Siemens scanning
environment.
In Siemens environments, using the ideacmdtool, the scanner is set up to
export data in real-time to a shared directory that is accessible from a
remote workstation (running Pyneal Scanner). For functional data, Siemens
scanners store reconstructed slices images by taking all of the slices for
a single volume, and placing them side-by-side in a larger "mosaic" dicom
image. A scan will produce one mosaic image per volume.
For anatomical data, dicom images for each 2D slice will be written as
separate files, numbered sequentially, and saved in the `sessionDir`.
All dicom images for all scans across a single session will be stored in
the same directory. We'll call this directory the `sessionDir`.
A single `sessionDir` will hold all of the mosaic files for all of the
series for the current session. The series number is contained in the
filename, which follows the pattern:
[session#]_[series#]_[vol#].dcm
These files will appear in real-time as the scan progresses.
This class contains methods to retrieve the current `sessionDir`, show the
current series that are present, and monitor the `sessionDir` for the
appearance of new series files.
"""
def __init__(self, scannerSettings):
""" Initialize the class
Parameters
----------
scannerSettings : object
class attributes represent all of the settings unique to the
current scanning environment (many of them read from
`scannerConfig.yaml`)
See Also
--------
general_utils.ScannerSettings
"""
# initialize class attributes
if 'scannerSessionDir' in scannerSettings.allSettings:
self.sessionDir = scannerSettings.allSettings['scannerSessionDir']
else:
print('No scannerSessionDir found in scannerConfig file')
sys.exit()
def print_currentSeries(self):
""" Find all of the series present in given sessionDir, and print them
all, along with time since last modification, and directory size
"""
# find the sessionDir, if not already found
if self.sessionDir is None:
self.findSessionDir()
print('Session Dir: ')
print('{}'.format(self.sessionDir))
# find all mosaic files in the sessionDir
self.uniqueSeries = self.getUniqueSeries()
if len(self.uniqueSeries) == 0:
print('No mosaic files found in {}'.format(self.sessionDir))
else:
# print out info on each unique series in sessionDir
currentTime = int(time.time())
print('Unique Series: ')
for series in sorted(self.uniqueSeries):
# get list of all dicoms that match this series number
thisSeriesDicoms = glob.glob(join(self.sessionDir, ('*_' + series + '_*.dcm')))
# get time since last modification for last dicom in list
lastModifiedTime = os.stat(thisSeriesDicoms[-1]).st_mtime
timeElapsed = currentTime - lastModifiedTime
m, s = divmod(timeElapsed, 60)
time_string = '{} min, {} s ago'.format(int(m), int(s))
print(' {}\t{} files \t{}'.format(series, len(thisSeriesDicoms), time_string))
def getUniqueSeries(self):
""" Return a list of unique series numbers from the filenames of the
files found in the sessionDir
"""
uniqueSeries = []
self.allMosaics = [f for f in os.listdir(self.sessionDir) if Siemens_filePattern.match(f)]
if len(self.allMosaics) > 0:
# find unique series numbers among all mosaics
seriesNums = []
for f in self.allMosaics:
seriesNums.append(Siemens_mosaicSeriesNumberField.search(f).group())
uniqueSeries = set(seriesNums)
return uniqueSeries
def waitForNewSeries(self, interval=.1):
""" Listen for the appearance of new series files
Once a scan starts, new series mosaic files will be created in the
`sessionDir`. By the time this function is called, this class should
already have the `sessionDir` defined
Parameters
----------
interval : float, optional
time, in seconds, to wait between polling for a new directory
Returns
-------
newSeries : string
seriesNum of the new series
"""
keepWaiting = True
existingSeries = self.getUniqueSeries()
while keepWaiting:
# get all of the unique series again
currentSeries = self.getUniqueSeries()
# compare against existing series
diff = currentSeries - existingSeries
if len(diff) > 0:
newSeries = diff.pop()
keepWaiting = False
# pause before searching directories again
time.sleep(interval)
# return the found series name
return newSeries
class Siemens_BuildNifti():
""" Tools to build a 3D or 4D Nifti image from all of the dicom mosaic
images in a directory.
Input is a path to a series directory containing dicom images (either
mosaic images for functional data, or 2D slice image for anatomical data).
Image parameters, like voxel spacing and dimensions, are obtained
automatically from the info in the dicom tags
End result is a Nifti1 formatted 3D (anat) or 4D (func) file in RAS+
orientation
"""
def __init__(self, seriesDir, seriesNum):
""" Initialize class, and set/obtain basic class attributes like file
paths and scan parameters
Parameters
----------
seriesDir : string
full path to the directory containing the raw dicom mosaic files
for each volume in the series
seriesNum : string
series number of the series that you'd like to build the nifti
image from
"""
# initialize attributes
self.seriesDir = seriesDir
self.seriesNum = seriesNum
self.niftiImage = None
# make a list of the specified raw dicom mosaic files in this dir
rawDicoms = glob.glob(join(self.seriesDir, ('*_' + str(self.seriesNum).zfill(6) + '_*.dcm')))
# figure out what type of image this is, 4d or 3d
self.scanType = self._determineScanType(rawDicoms[0])
# build the nifti image
if self.scanType == 'anat':
self.niftiImage = self.buildAnat(rawDicoms)
elif self.scanType == 'func':
self.niftiImage = self.buildFunc(rawDicoms)
def buildAnat(self, dicomFiles):
""" Build a 3D structural/anatomical image from list of dicom files
Given a list of `dicomFiles`, build a 3D anatomical image from them.
Figure out the image dimensions and affine transformation to map
from voxels to mm from the dicom tags
Parameters
----------
dicomFiles : list
list containing the file names (file names ONLY, no path) of all
dicom slice images to be used in constructing the final nifti image
Returns
-------
anatImage_RAS : Nifti1Image
nifti-1 formated image of the 3D anatomical data, oriented in
RAS+
See Also
--------
nibabel.nifti1.Nifti1Image()
"""
# read the first dicom in the list to get overall image dimensions
dcm = pydicom.dcmread(join(self.seriesDir, dicomFiles[0]), stop_before_pixels=1)
sliceDims = (getattr(dcm, 'Columns'), getattr(dcm, 'Rows'))
self.nSlicesPerVol = len(dicomFiles)
sliceThickness = getattr(dcm, 'SliceThickness')
### Build 3D array of voxel data
# create an empty array to store the slice data
imageMatrix = np.zeros(shape=(
sliceDims[0],
sliceDims[1],
self.nSlicesPerVol), dtype='int16')
# Use the InstanceNumber tag to order the slices. This works for anat
# 3D images only, since the instance numbers do not repeat as they would
# with functional data with multiple volumes
sliceDict = {}
for s in dicomFiles:
dcm = pydicom.dcmread(join(self.seriesDir, s))
sliceDict[dcm.InstanceNumber] = join(self.seriesDir, s)
# sort by InStackPositionNumber and assemble the image
for sliceIdx, ISPN in enumerate(sorted(sliceDict.keys())):
dcm = pydicom.dcmread(sliceDict[ISPN])
# extract the pixel data as a numpy array. Transpose
# so that the axes order go [cols, rows]
pixel_array = dcm.pixel_array.T
# place in the image matrix
imageMatrix[:, :, sliceIdx] = pixel_array
### create the affine transformation to map from vox to mm space
# in order to do this, we need to get some values from the first and
# last slices in the volume.
firstSlice = sliceDict[sorted(sliceDict.keys())[0]]
lastSlice = sliceDict[sorted(sliceDict.keys())[-1]]
dcm_first = pydicom.dcmread(firstSlice)
dcm_last = pydicom.dcmread(lastSlice)
self.pixelSpacing = getattr(dcm_first, 'PixelSpacing')
self.firstSlice_IOP = np.array(getattr(dcm_first, 'ImageOrientationPatient'))
self.firstSlice_IPP = np.array(getattr(dcm_first, 'ImagePositionPatient'))
self.lastSlice_IPP = np.array(getattr(dcm_last, 'ImagePositionPatient'))
# now we can build the affine
affine = self.buildAffine()
### Build a Nifti object, reorder it to RAS+
anatImage = nib.Nifti1Image(imageMatrix, affine=affine)
anatImage_RAS = nib.as_closest_canonical(anatImage) # reoder to RAS+
print('Nifti image dims: {}'.format(anatImage_RAS.shape))
return anatImage_RAS
def buildFunc(self, dicomFiles):
""" Build a 4D functional image from list of dicom files
Given a list of dicomFile paths, build a 4d functional image. For
Siemens scanners, each dicom file is assumed to represent a mosaic
image comprised of mulitple slices. This tool will split apart the
mosaic images, and construct a 4D nifti object. The 4D nifti object
contain a voxel array ordered like RAS+ as well the affine
transformation to map between vox and mm space
Parameters
----------
dicomFiles : list
list containing the file names (file names ONLY, no path) of all
dicom mosaic images to be used in constructing the final nifti
image
"""
imageMatrix = None
affine = None
TR = None
# make dicomFiles store the full path
dicomFiles = [join(self.seriesDir, f) for f in dicomFiles]
### Loop over all dicom mosaic files
nVols = len(dicomFiles)
for mosaic_dcm_fname in dicomFiles:
### Parse the mosaic image into a 3D volume
# we use the nibabel mosaic_to_nii() method which does a lot of the
# heavy-lifting of extracting slices, arranging in a 3D array, and
# grabbing the affine
dcm = pydicom.dcmread(mosaic_dcm_fname) # create dicom object
# for mosaic files, the instanceNumber tag will correspond to the
# volume number (using a 1-based indexing, so subtract by 1)
volIdx = dcm.InstanceNumber - 1
# convert the dicom object to nii
thisVol = dicomreaders.mosaic_to_nii(dcm)
# convert to RAS+
thisVol_RAS = nib.as_closest_canonical(thisVol)
if TR is None:
TR = dcm.RepetitionTime / 1000
# construct the imageMatrix if it hasn't been made yet
if imageMatrix is None:
imageMatrix = np.zeros(shape=(thisVol_RAS.shape[0],
thisVol_RAS.shape[1],
thisVol_RAS.shape[2],
nVols), dtype=np.uint16)
# construct the affine if it isn't made yet
if affine is None:
affine = thisVol_RAS.affine
# Add this data to the image matrix
imageMatrix[:, :, :, volIdx] = thisVol_RAS.get_fdata()
### Build a Nifti object
funcImage = nib.Nifti1Image(imageMatrix, affine=affine)
pixDims = np.array(funcImage.header.get_zooms())
pixDims[3] = TR
funcImage.header.set_zooms(pixDims)
return funcImage
def buildAffine(self):
""" Build the affine matrix that will transform the data to RAS+.
This function should only be called once the required data has been
extracted from the dicom tags from the relevant slices. The affine
matrix is constructed by using the information in the
ImageOrientationPatient and ImagePositionPatient tags from the first
and last slices in a volume.
However, note that those tags will tell you how to orient the image to
DICOM reference coordinate space, which is LPS+. In order to to get to
RAS+ we have to invert the first two axes.
Notes
-----
For more info on building this affine, please see the documentation at:
http://nipy.org/nibabel/dicom/dicom_orientation.html
http://nipy.org/nibabel/coordinate_systems.html
"""
### Get the ImageOrientation values from the first slice,
# split the row-axis values (0:3) and col-axis values (3:6)
# and then invert the first and second values of each
rowAxis_orient = self.firstSlice_IOP[0:3] * np.array([-1, -1, 1])
colAxis_orient = self.firstSlice_IOP[3:6] * np.array([-1, -1, 1])
### Get the voxel size along Row and Col axis
voxSize_row = float(self.pixelSpacing[0])
voxSize_col = float(self.pixelSpacing[1])
### Figure out the change along the 3rd axis by subtracting the
# ImagePosition of the last slice from the ImagePosition of the first,
# then dividing by 1/(total number of slices-1), then invert to
# make it go from LPS+ to RAS+
slAxis_orient = (self.firstSlice_IPP - self.lastSlice_IPP) / (1 - self.nSlicesPerVol)
slAxis_orient = slAxis_orient * np.array([-1, -1, 1])
### Invert the first two values of the firstSlice ImagePositionPatient.
# This tag represents the translation needed to take the origin of our 3D voxel
# array to the origin of the LPS+ reference coordinate system. Since we want
# RAS+, need to invert those first two axes
voxTranslations = self.firstSlice_IPP * np.array([-1, -1, 1])
### Assemble the affine matrix
affine = np.matrix([
[rowAxis_orient[0] * voxSize_row, colAxis_orient[0] * voxSize_col, slAxis_orient[0], voxTranslations[0]],
[rowAxis_orient[1] * voxSize_row, colAxis_orient[1] * voxSize_col, slAxis_orient[1], voxTranslations[1]],
[rowAxis_orient[2] * voxSize_row, colAxis_orient[2] * voxSize_col, slAxis_orient[2], voxTranslations[2]],
[0, 0, 0, 1]
])
return affine
def _determineScanType(self, dicomFile):
""" Figure out what type of scan this is, anat or func
This tool will determine the scan type from a given dicom file.
Possible scan types are either single 3D volume (anat), or a 4D dataset
built up of 2D slices (func). The scan type is determined by reading
the `MRAcquisitionType` tag from the dicom file
Parameters
----------
dcmFile : string
file name of dicom file from the current series that you would like
to open to read the imaging parameters from
Returns
-------
scanType : string
either 'anat' or 'func' depending on scan type stored in dicom tag
"""
# read the dicom file
dcm = pydicom.dcmread(join(self.seriesDir, dicomFile), stop_before_pixels=1)
if getattr(dcm, 'MRAcquisitionType') == '3D':
scanType = 'anat'
elif getattr(dcm, 'MRAcquisitionType') == '2D':
scanType = 'func'
else:
print('Cannot determine a scan type from this image!')
sys.exit()
return scanType
def get_scanType(self):
""" Return the scan type """
return self.scanType
def get_niftiImage(self):
""" Return the constructed Nifti Image """
return self.niftiImage
def write_nifti(self, output_path):
""" Write the nifti file to disk
Parameters
----------
outputPath : string
full path, including filename, you want to use to save the nifti
image
"""
nib.save(self.niftiImage, output_path)
print('Image saved at: {}'.format(output_path))
class Siemens_monitorSessionDir(Thread):
""" Class to monitor for new mosaic images to appear in the sessionDir.
This class will run independently in a separate thread. Each new mosaic
file that appears and matches the current series number will be added to
the Queue for further processing
"""
def __init__(self, sessionDir, seriesNum, dicomQ, interval=.2):
""" Initialize the class, and set basic class attributes
Parameters
----------
sessionDir : string
full path to the session directory where new dicom mosaic files
will appear
seriesNum : string
series number assigned to the new series
dicomQ : object
instance of python queue class to hold new dicom files before they
have been processed. This class will add items to that queue.
interval : float, optional
time, in seconds, to wait before repolling the seriesDir to check
for any new files
"""
# start the thread upon completion
Thread.__init__(self)
# set up logger
self.logger = logging.getLogger(__name__)
# initialize class parameters
self.interval = interval # interval for polling for new files
self.sessionDir = sessionDir # full path to series directory
self.seriesNum = seriesNum # series number of current series
self.dicomQ = dicomQ # queue to store dicom mosaic files
self.alive = True # thread status
self.numMosaicsAdded = 0 # counter to keep track of # mosaics
self.queued_mosaic_files = set() # empty set to store names of queued mosaic
def run(self):
# function that runs while the Thread is still alive
while self.alive:
# create a set of all mosaic files with the current series num
#currentMosaics = set(os.listdir(self.seriesDir))
currentMosaics = set(glob.glob(join(self.sessionDir, ('*_' + str(self.seriesNum).zfill(6) + '_*.dcm'))))
# grab only the ones that haven't already been added to the queue
newMosaics = [f for f in currentMosaics if f not in self.queued_mosaic_files]
# loop over each of the new mosaic files, add each to queue
for f in newMosaics:
mosaic_fname = join(self.sessionDir, f)
try:
self.dicomQ.put(mosaic_fname)
except:
self.logger.error('failed on: {}'.format(mosaic_fname))
print(sys.exc_info())
sys.exit()
if len(newMosaics) > 0:
self.logger.debug('Put {} new mosaic file on the queue'.format(len(newMosaics)))
self.numMosaicsAdded += len(newMosaics)
# now update the set of mosaics added to the queue
self.queued_mosaic_files.update(set(newMosaics))
# pause
time.sleep(self.interval)
def get_numMosaicsAdded(self):
""" Return the cumulative number of mosaic files added to the queue thus far """
return self.numMosaicsAdded
def stop(self):
""" Set the `alive` flag to False, stopping thread """
self.alive = False
class Siemens_processMosaic(Thread):
""" Class to process each mosaic file in the queue.
This class will run in a separate thread. While running, it will pull
'tasks' off of the queue and process each one. Processing each task
involves reading the mosaic file, converting it to a 3D Nifti object,
reordering it to RAS+, and then sending the volume out over the
pynealSocket
"""
def __init__(self, dicomQ, pynealSocket, interval=.2):
""" Initialize the class
Parameters
----------
dicomQ : object
instance of python queue class that will store the dicom slice file
names. This class will pull items from that queue.
pynealSocket : object
instance of ZMQ style socket that will be used to communicate with
Pyneal. This class will use this socket to send image data and
headers to Pyneal during the real-time scan.
See also: general_utils.create_pynealSocket()
interval : float, optional
time, in seconds, to wait before repolling the queue to see if
there are any new file names to process
"""
# start the threat upon creation
Thread.__init__(self)
# set up logger
self.logger = logging.getLogger(__name__)
# initialize class parameters
self.dicomQ = dicomQ
self.interval = interval # interval between polling queue for new files
self.alive = True
self.pynealSocket = pynealSocket
self.totalProcessed = 0 # counter for total number of slices processed
def run(self):
self.logger.debug('Siemens_processMosaic started')
# function to run on loop
while self.alive:
# if there are any mosaic files in the queue, process them
if not self.dicomQ.empty():
numMosaicsInQueue = self.dicomQ.qsize()
# loop through all mosaics currently in queue & process
for m in range(numMosaicsInQueue):
# retrieve file name from queue
mosaic_dcm_fname = self.dicomQ.get(True, 2)
# ensure the file has copied completely
file_size = 0
while True:
file_info = os.stat(mosaic_dcm_fname)
if file_info.st_size == 0 or file_info.st_size > file_size:
file_size = file_info.st_size
else:
break
# process this mosaic
self.processMosaicFile(mosaic_dcm_fname)
# complete this task, thereby clearing it from the queue
self.dicomQ.task_done()
# log how many were processed
self.totalProcessed += numMosaicsInQueue
self.logger.debug('Processed {} tasks from the queue ({} total)'.format(numMosaicsInQueue, self.totalProcessed))
# pause for a bit
time.sleep(self.interval)
def processMosaicFile(self, mosaic_dcm_fname):
""" Process a given mosaic dicom file
This method will read the dicom mosaic file. Convert to a nifti object
that will provide the 3D voxel array for this mosaic. Reorder to RAS+,
and then send to the pynealSocket
Parameters
----------
mosaic_dcm_fname : string
full path to the dicom mosaic file that you want to process
"""
### Figure out the volume index for this mosaic by reading
# the field from the file name itself
mosaicFile_root, mosaicFile_name = os.path.split(mosaic_dcm_fname)
volIdx = int(Siemens_mosaicVolumeNumberField.search(mosaicFile_name).group(0)) - 1
self.logger.info('Volume {} processing'.format(volIdx))
### Parse the mosaic image into a 3D volume
# we use the nibabel mosaic_to_nii() method which does a lot of the
# heavy-lifting of extracting slices, arranging in a 3D array, and
# grabbing the affine
dcm = pydicom.dcmread(mosaic_dcm_fname) # create dicom object
thisVol = dicomreaders.mosaic_to_nii(dcm) # convert to nifti
# convert to RAS+
thisVol_RAS = nib.as_closest_canonical(thisVol)
# get the data as a contiguous array (required for ZMQ)
thisVol_RAS_data = np.ascontiguousarray(thisVol_RAS.get_fdata())
### Create a header with metadata info
volHeader = {
'volIdx': volIdx,
'dtype': str(thisVol_RAS_data.dtype),
'shape': thisVol_RAS_data.shape,
'affine': json.dumps(thisVol_RAS.affine.tolist()),
'TR': str(dcm.RepetitionTime / 1000)}
### Send the voxel array and header to the pynealSocket
self.sendVolToPynealSocket(volHeader, thisVol_RAS_data)
def sendVolToPynealSocket(self, volHeader, voxelArray):
""" Send the volume data to Pyneal
Send the image data and header information for the specified volume to
Pyneal via the `pynealSocket`.
Parameters
----------
volHeader : dict
key:value pairs for all of the relevant metadata for this volume
voxelArray : numpy array
3D numpy array of voxel data from the volume, reoriented to RAS+
"""
self.logger.debug('TO pynealSocket: vol {}'.format(volHeader['volIdx']))
### Send data out the socket, listen for response
self.pynealSocket.send_json(volHeader, zmq.SNDMORE) # header as json
self.pynealSocket.send(voxelArray, flags=0, copy=False, track=False)
pynealSocketResponse = self.pynealSocket.recv_string()
# log the success
self.logger.debug('FROM pynealSocket: {}'.format(pynealSocketResponse))
# check if that was the last volume, and if so, stop
if 'STOP' in pynealSocketResponse:
self.stop()
def stop(self):
""" set the `alive` flag to False, stopping the thread """
self.alive = False
def Siemens_launch_rtfMRI(scannerSettings, scannerDirs):
""" Launch a real-time session in a Siemens environment.
This method should be called from pynealScanner.py before starting the
scanner. Once called, this method will take care of:
- monitoring the sessionDir for new series files to appear (and
then returing the new series number)
- set up the socket connection to send volume data over
- creating a Queue to store newly arriving DICOM files
- start a separate thread to monitor the new series appearing
- start a separate thread to process DICOMs that are in the Queue
"""
# Create a reference to the logger. This assumes the logger has already
# been created and customized by pynealScanner.py
logger = logging.getLogger(__name__)
#### SET UP PYNEAL SOCKET (this is what we'll use to
#### send data (e.g. header, volume voxel data) to remote connections)
# figure out host and port number to use
host = scannerSettings.get_pynealSocketHost()
port = scannerSettings.get_pynealSocketPort()
logger.debug('Scanner Socket Host: {}'.format(host))
logger.debug('Scanner Socket Port: {}'.format(port))
# create a socket connection
from .general_utils import create_pynealSocket
pynealSocket = create_pynealSocket(host, port)
logger.debug('Created pynealSocket')
# wait for remote to connect on pynealSocket
logger.info('Connecting to pynealSocket...')
while True:
msg = 'hello from pyneal_scanner '
pynealSocket.send_string(msg)
msgResponse = pynealSocket.recv_string()
if msgResponse == msg:
break
logger.info('pynealSocket connected')
### Wait for a new series directory appear
logger.info('Waiting for new series files to appear...')
seriesNum = scannerDirs.waitForNewSeries()
logger.info('New Series Number: {}'.format(seriesNum))
### Start threads to A) watch for new mosaic files, and B) process
# them as they appear
# initialize the dicom queue to keep store newly arrived
# dicom mosaic images, and keep track of which have been processed
dicomQ = Queue()
# create instance of class that will monitor sessionDir for new mosaic
# images to appear. Pass in a copy of the dicom queue. Start the thread
scanWatcher = Siemens_monitorSessionDir(scannerDirs.sessionDir, seriesNum, dicomQ)
scanWatcher.start()
# create an instance of the class that will grab mosaic dicoms
# from the queue, reformat the data, and pass over the socket
# to pyneal. Start the thread going
mosaicProcessor = Siemens_processMosaic(dicomQ, pynealSocket)
mosaicProcessor.start()
|
mit
| 7,839,139,264,983,837,000
| 38.713535
| 128
| 0.623552
| false
| 4.091795
| false
| false
| false
|
rnelson/adventofcode
|
advent2015/day08.py
|
1
|
2659
|
#!/usr/bin/env python
"""
http://adventofcode.com/day/8
Part 1
------
Space on the sleigh is limited this year, and so Santa will be
bringing his list as a digital copy. He needs to know how much
space it will take up when stored.
It is common in many programming languages to provide a way to
escape special characters in strings. For example, C, JavaScript,
Perl, Python, and even PHP handle special characters in very
similar ways.
However, it is important to realize the difference between the
number of characters in the code representation of the string
literal and the number of characters in the in-memory string
itself.
(examples removed because the interpreter was complaining
about the escaping - ha)
Disregarding the whitespace in the file, what is the number of
characters of code for string literals minus the number of characters
in memory for the values of the strings in total for the entire file?
For example, given the four strings above, the total number of
characters of string code (2 + 5 + 10 + 6 = 23) minus the total
number of characters in memory for string values (0 + 3 + 7 +
1 = 11) is 23 - 11 = 12.
Part 2
------
Now, let's go the other way. In addition to finding the number of
characters of code, you should now encode each code representation
as a new string and find the number of characters of the new encoded
representation, including the surrounding double quotes.
(examples removed because the interpreter was complaining
about the escaping - ha)
Your task is to find the total number of characters to represent
the newly encoded strings minus the number of characters of code in
each original string literal. For example, for the strings above,
the total encoded length (6 + 9 + 16 + 11 = 42) minus the characters
in the original code representation (23, just like in the first
part of this puzzle) is 42 - 23 = 19.
"""
from __future__ import print_function
import os
import re
import sys
INFILE = 'inputs/input08.txt'
def main():
total_length = 0
unescaped_length = 0
escaped_length = 0
with open(INFILE) as f:
# Part 1
for line in f:
input = line.strip()
total_length += len(input)
unescaped = input[1:-1].decode('string_escape')
unescaped_length += len(unescaped)
escaped = '"{}"'.format(re.escape(input))
escaped_length += len(escaped)
msg = '[Python] Puzzle 8-1: {}'
print(msg.format(total_length - unescaped_length))
# Part 2
msg = '[Python] Puzzle 8-2: {}'
print(msg.format(escaped_length - total_length))
if __name__ == '__main__':
main()
|
mit
| 7,221,743,905,681,149,000
| 29.918605
| 69
| 0.703272
| false
| 3.921829
| false
| false
| false
|
bfollinprm/Nquintessence
|
cosmoslik/cosmoslik_plugins/likelihoods/wmap/wmap.py
|
1
|
2594
|
from numpy import zeros
from cosmoslik import Likelihood, SubprocessExtension
import os
class wmap(Likelihood):
"""
===============
WMAP Likelihood
===============
- Written by WMAP team (see `<http://lambda.gsfc.nasa.gov/>`_)
- CosmoSlik module by Marius Millea
- Updated July 1, 2012
Description
===========
This module wraps the official WMAP likelihood code.
Some minor modifications were made to allow:
- Choosing the WMAP data directory at runtime
- Choosing the lmin/lmax at runtime
Install Notes
=============
This build this module run::
./cosmoslik.py --build likelihoods.wmap
The Makefile for this module reads the following flags from ``Makefile.inc``:
- ``$(CFITSIO)``
- ``$(LAPACK)``
- ``$(F2PYFLAGS)``
Models
======
The WMAP module requires a `Model` which provides the following:
- ``cl_TT``
- ``cl_TE``
- ``cl_EE``
- ``cl_BB``
Extra-galactic foregrounds are ignored.
Parameters
==========
This module reads the following parameters from the ini file:
[wmap].data_dir
---------------
The path to the wmap/data directory.
[wmap].use
----------
A subset of ``['TT','TE','EE','BB']`` corresponding to
which likelihood terms to use.
[wmap].TT.lrange
----------------
The TT range in ell to use in the likelihood
[wmap].TE.lrange
----------------
The TE range in ell to use in the likelihood
"""
def __init__(self,
datadir,
use=['TT','TE','EE','BB'],
ttmin=2,ttmax=1200,
temin=2,temax=800):
self.use = use
if not os.path.exists(datadir): raise Exception("The WMAP data directory you specified does not exist: '%s'"%datadir)
self.pywmap = SubprocessExtension('pywmap',globals())
self.pywmap.wmapinit(ttmin,ttmax,temin,temax,os.path.normpath(datadir)+'/')
def __call__(self, cmb):
cltt, clte, clee, clbb = [zeros(1202) for _ in range(4)]
for cl,x in zip([cltt,clte,clee,clbb],['TT','TE','EE','BB']):
if x in self.use:
m = cmb['cl_%s'%x]
s = slice(0,min(len(m),len(cl)))
cl[s] = m[s]
liketerms = self.pywmap.wmaplnlike(cltt=cltt[2:],clte=clte[2:],clee=clee[2:],clbb=clbb[2:])
return sum(liketerms)
|
mit
| -6,767,131,946,119,144,000
| 24.683168
| 125
| 0.515806
| false
| 3.786861
| false
| false
| false
|
yaricom/brainhash
|
src/experiment_cA7_dt_th_al_ah_bl_bh_gl.py
|
1
|
2069
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The experiment with 10 Hz/5Hz, wisp, attention, 70, cA 7, delta, theta, alpha low, alpha high, beta low, beta high, gamm low, batch size = 5 and
balanced data set
@author: yaric
"""
import experiment as ex
import config
from time import time
n_hidden = 7
batch_size = 5
experiment_name = 'cA_%d_dt-th-a_l-a_h-b_l-b_h-g_l' % (n_hidden) # will be used as parent dir for analyzer results
# The sample records identifiers
signal_ids = ['IO_10_2', 'IO_TXT', 'IO_SKY', 'KS_10_2', 'RO_10_2']
noise_ids = ['noise']
# Setup analyzer configuration
analyzer_config = ex.defaultAnalyzerConfig()
analyzer_config['batch_size'] = batch_size
analyzer_config['learning_rate'] = 0.1
analyzer_config['n_hidden'] = n_hidden
analyzer_config['training_epochs'] = 50000
analyzer_config['encoder'] = 'cA'
analyzer_config['bands'] = 'delta,theta,alpha_l,alpha_h,beta_l,beta_h,gamma_l'
start = time()
#
# Run analyzer
#
print("\nStart analysis with parameters:\n%s\n" % analyzer_config)
print("Start analysis for signal records: %s" % signal_ids)
ex.runEEGAnalyzerWithIDs(ids_list=signal_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
print("Start analysis for noise records: %s" % noise_ids)
ex.runEEGAnalyzerWithIDs(ids_list=noise_ids,
experiment_name=experiment_name,
a_config=analyzer_config)
#
# Run classifiers
#
signal_dir = "%s/%s" % (config.analyzer_out_dir, experiment_name)
noise_dir = "%s/%s/%s" % (config.analyzer_out_dir, experiment_name, noise_ids[0])
out_suffix = experiment_name
print("Run classifiers over analyzed records. \nSignal dir: %s\nNoise dir: %s"
% (signal_dir, noise_dir))
ex.runClassifier(signal_dir=signal_dir,
signal_records=signal_ids,
noise_dir=noise_dir,
out_suffix=out_suffix)
print("\n\nExperiment %s took %.2f seconds.\n"
% (experiment_name, time() - start))
|
gpl-3.0
| 5,035,609,485,294,253,000
| 31.328125
| 145
| 0.640889
| false
| 3.134848
| true
| false
| false
|
pbanaszkiewicz/amy
|
amy/extrequests/filters.py
|
1
|
7324
|
import re
from django.db.models import Q
from django.forms import widgets
import django_filters
from extrequests.models import SelfOrganisedSubmission, WorkshopInquiryRequest
from workshops.fields import Select2Widget
from workshops.filters import (
AllCountriesFilter,
AMYFilterSet,
ContinentFilter,
ForeignKeyAllValuesFilter,
NamesOrderingFilter,
StateFilterSet,
)
from workshops.models import Curriculum, Person, TrainingRequest, WorkshopRequest
# ------------------------------------------------------------
# TrainingRequest related filter and filter methods
# ------------------------------------------------------------
class TrainingRequestFilter(AMYFilterSet):
search = django_filters.CharFilter(
label="Name or Email",
method="filter_by_person",
)
group_name = django_filters.CharFilter(
field_name="group_name", lookup_expr="icontains", label="Group"
)
state = django_filters.ChoiceFilter(
label="State",
choices=(("no_d", "Pending or accepted"),) + TrainingRequest.STATE_CHOICES,
method="filter_training_requests_by_state",
)
matched = django_filters.ChoiceFilter(
label="Is Matched?",
choices=(
("", "Unknown"),
("u", "Unmatched"),
("p", "Matched trainee, unmatched training"),
("t", "Matched trainee and training"),
),
method="filter_matched",
)
nonnull_manual_score = django_filters.BooleanFilter(
label="Manual score applied",
method="filter_non_null_manual_score",
widget=widgets.CheckboxInput,
)
affiliation = django_filters.CharFilter(
method="filter_affiliation",
)
location = django_filters.CharFilter(lookup_expr="icontains")
order_by = NamesOrderingFilter(
fields=(
"created_at",
"score_total",
),
)
class Meta:
model = TrainingRequest
fields = [
"search",
"group_name",
"state",
"matched",
"affiliation",
"location",
]
def filter_matched(self, queryset, name, choice):
if choice == "":
return queryset
elif choice == "u": # unmatched
return queryset.filter(person=None)
elif choice == "p": # matched trainee, unmatched training
return (
queryset.filter(person__isnull=False)
.exclude(
person__task__role__name="learner",
person__task__event__tags__name="TTT",
)
.distinct()
)
else: # choice == 't' <==> matched trainee and training
return queryset.filter(
person__task__role__name="learner",
person__task__event__tags__name="TTT",
).distinct()
def filter_by_person(self, queryset, name, value):
if value == "":
return queryset
else:
# 'Harry Potter' -> ['Harry', 'Potter']
tokens = re.split(r"\s+", value)
# Each token must match email address or github username or
# personal, or family name.
for token in tokens:
queryset = queryset.filter(
Q(personal__icontains=token)
| Q(middle__icontains=token)
| Q(family__icontains=token)
| Q(email__icontains=token)
| Q(person__personal__icontains=token)
| Q(person__middle__icontains=token)
| Q(person__family__icontains=token)
| Q(person__email__icontains=token)
)
return queryset
def filter_affiliation(self, queryset, name, affiliation):
if affiliation == "":
return queryset
else:
q = Q(affiliation__icontains=affiliation) | Q(
person__affiliation__icontains=affiliation
)
return queryset.filter(q).distinct()
def filter_training_requests_by_state(self, queryset, name, choice):
if choice == "no_d":
return queryset.exclude(state="d")
else:
return queryset.filter(state=choice)
def filter_non_null_manual_score(self, queryset, name, manual_score):
if manual_score:
return queryset.filter(score_manual__isnull=False)
return queryset
# ------------------------------------------------------------
# WorkshopRequest related filter and filter methods
# ------------------------------------------------------------
class WorkshopRequestFilter(AMYFilterSet, StateFilterSet):
assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)
country = AllCountriesFilter(widget=Select2Widget)
continent = ContinentFilter(widget=Select2Widget, label="Continent")
requested_workshop_types = django_filters.ModelMultipleChoiceFilter(
label="Requested workshop types",
queryset=Curriculum.objects.all(),
widget=widgets.CheckboxSelectMultiple(),
)
order_by = django_filters.OrderingFilter(
fields=("created_at",),
)
class Meta:
model = WorkshopRequest
fields = [
"state",
"assigned_to",
"requested_workshop_types",
"country",
]
# ------------------------------------------------------------
# WorkshopInquiryRequest related filter and filter methods
# ------------------------------------------------------------
class WorkshopInquiryFilter(AMYFilterSet, StateFilterSet):
assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)
country = AllCountriesFilter(widget=Select2Widget)
continent = ContinentFilter(widget=Select2Widget, label="Continent")
requested_workshop_types = django_filters.ModelMultipleChoiceFilter(
label="Requested workshop types",
queryset=Curriculum.objects.all(),
widget=widgets.CheckboxSelectMultiple(),
)
order_by = django_filters.OrderingFilter(
fields=("created_at",),
)
class Meta:
model = WorkshopInquiryRequest
fields = [
"state",
"assigned_to",
"requested_workshop_types",
"country",
]
# ------------------------------------------------------------
# SelfOrganisedSubmission related filter and filter methods
# ------------------------------------------------------------
class SelfOrganisedSubmissionFilter(AMYFilterSet, StateFilterSet):
assigned_to = ForeignKeyAllValuesFilter(Person, widget=Select2Widget)
country = AllCountriesFilter(widget=Select2Widget)
continent = ContinentFilter(widget=Select2Widget, label="Continent")
workshop_types = django_filters.ModelMultipleChoiceFilter(
label="Requested workshop types",
queryset=Curriculum.objects.all(),
widget=widgets.CheckboxSelectMultiple(),
)
order_by = django_filters.OrderingFilter(
fields=("created_at",),
)
class Meta:
model = SelfOrganisedSubmission
fields = [
"state",
"assigned_to",
"workshop_types",
"workshop_format",
]
|
mit
| -316,208,959,969,467,600
| 31.264317
| 83
| 0.560213
| false
| 4.5775
| false
| false
| false
|
jmcanterafonseca/fiware-cygnus
|
test/acceptance/tools/ckan_utils.py
|
1
|
14123
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of fiware-cygnus (FI-WARE project).
#
# fiware-cygnus is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
# fiware-cygnus is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with fiware-cygnus. If not, see
# http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License please contact:
# iot_support at tid.es
#
__author__ = 'Iván Arias León (ivan.ariasleon at telefonica dot com)'
# general constants
from tools import general_utils, http_utils
EMPTY = u''
WITHOUT = u'without'
# url, headers and payload constants
HEADER_AUTHORIZATION = u'authorization'
HEADER_CONTENT_TYPE = u'Content-Type'
HEADER_APPLICATION = u'application/json'
VERSION = u'ckan_version'
VERSION_VALUE_DEFAULT = u'2.0'
HOST = u'host'
HOST_VALUE_DEFAULT = u'127.0.0.1'
PORT = u'port'
PORT_VALUE_DEFAULT = u'80'
AUTHORIZATION = u'authorization'
VERIFY_VERSION = u'verify_version'
FALSE_VALUE = u'false'
ORION_URL = u'orion_url'
ORION_URL_DEFAULT = u'http://localhost:1026'
SSL = u'ssl'
RETRIES_DATASET_SEARCH = u'retries_dataset_search'
DELAY_TO_RETRY = u'delay_to_retry'
PATH_VERSION_CKAN = u'api/util/status'
PATH_API_CREATE = u'api/3/action'
PATH_PACKAGE_SHOW = u'package_show?id='
PATH_DSTORE_SEARCH_SQL = u'datastore_search_sql?sql='
ORGANIZATION_LIST = u'organization_list'
ORGANIZATION_CREATE = u'organization_create'
PACKAGE_CREATE = u'package_create'
RESOURCE_CREATE = u'resource_create'
DATASTORE_CREATE = u'datastore_create'
PACKAGE_SHOW = u'package_show'
DATASTORE_SEARCH_SQL = u'datastore_search_sql'
RESULT = u'result'
RECORDS = u'records'
NAME = u'name'
OWNER_ORG = u'owner_org'
ID = u'id'
TYPE = u'type'
RESOURCES = u'resources'
URL_EXAMPLE = u'http://foo.bar/newresource'
URL = u'url'
PACKAGE_ID = u'package_id'
RESOURCE_ID = u'resource_id'
FIELD = u'fields'
FORCE = u'force'
RECVTIME = u'recvTime'
TIMESTAMP = u'timestamp'
TRUE = u'true'
class Ckan:
def __init__(self, **kwargs):
"""
constructor
:param ckan_version: ckan version (OPTIONAL)
:param ckan_verify_version: determine whether the version is verified or not (True or False). (OPTIONAL)
:param authorization: API KEY (authorization) used in ckan requests (OPTIONAL)
:param host: ckan host (MANDATORY)
:param port: ckan port (MANDATORY)
:param orion_url: Orion URL used to compose the resource URL with the convenience operation URL to query it (OPTIONAL)
:param ssl: enable SSL for secure Http transportation; 'true' or 'false' (OPTIONAL)
:param capacity: capacity of the channel (OPTIONAL)
:param channel_transaction_capacity: amount of bytes that can be sent per transaction (OPTIONAL)
:param retries_number: number of retries when get values (OPTIONAL)
:param delay_to_retry: time to delay each retry (OPTIONAL)
endpoint_url: endpoint url used in ckan requests
"""
self.version = kwargs.get(VERSION, VERSION_VALUE_DEFAULT)
self.ckan_verify_version = kwargs.get(VERIFY_VERSION, FALSE_VALUE)
self.authorization = kwargs.get(AUTHORIZATION, EMPTY)
self.host = kwargs.get(HOST, HOST_VALUE_DEFAULT)
self.port = kwargs.get(PORT, PORT_VALUE_DEFAULT)
self.orion_url = kwargs.get(ORION_URL, ORION_URL_DEFAULT)
self.ssl = kwargs.get(SSL, FALSE_VALUE)
self.capacity = kwargs.get("capacity", "1000")
self.transaction_capacity= kwargs.get("transaction_capacity", "100")
self.retries_number = kwargs.get(RETRIES_DATASET_SEARCH, 15)
self.retry_delay = kwargs.get(DELAY_TO_RETRY, 10)
if self.ssl.lower() == "true":
self.endpoint = "https://"
if self.ssl.lower() == "false":
self.endpoint = "http://"
self.endpoint = self.endpoint + self.host+":"+self.port
def __create_url(self, operation, element=EMPTY):
"""
create the url for different operations
:param operation: operation type (dataset, etc)
:return: request url
"""
if operation == VERSION:
value = "%s/%s" % (self.endpoint, PATH_VERSION_CKAN)
if operation == ORGANIZATION_CREATE or operation == PACKAGE_CREATE or operation == RESOURCE_CREATE or operation == DATASTORE_CREATE or operation == ORGANIZATION_LIST:
value = "%s/%s/%s" % (self.endpoint, PATH_API_CREATE, operation) # organization Name
if operation == PACKAGE_SHOW:
value = "%s/%s/%s%s" % (self.endpoint, PATH_API_CREATE, PATH_PACKAGE_SHOW, element) # datasetName
if operation == DATASTORE_SEARCH_SQL:
value = "%s/%s/%s%s" % (self.endpoint, PATH_API_CREATE, PATH_DSTORE_SEARCH_SQL, element) # sql
return value
def __create_headers(self):
"""
create headers for different requests
:return header dict
"""
return {HEADER_AUTHORIZATION: self.authorization, HEADER_CONTENT_TYPE: HEADER_APPLICATION}
def __create_datastore_in_resource (self, resource_id, fields):
"""
create a datastore in a resource
:param resource_id: resource id
:param fields: field in datastore
"""
payload = general_utils.convert_dict_to_str({RESOURCE_ID: resource_id,
FIELD:fields,
FORCE: TRUE}, general_utils.JSON)
resp = http_utils.request(http_utils.POST, url=self.__create_url(DATASTORE_CREATE), headers=self.__create_headers(), data=payload)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - Creating datastore in resource id: %s" % (resource_id))
# ------------------------------ public methods ----------------------------------------
def verify_version (self):
"""
Verify if ckan is installed and that version is the expected, default version is 2.0
"""
if self.ckan_verify_version.lower() == "true":
resp= http_utils.request(http_utils.GET, url=self.__create_url(VERSION), headers=self.__create_headers())
body_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
assert self.version == str(body_dict[VERSION]), \
"Wrong ckan version verified: %s. Expected: %s. \n\nBody content: %s" % (str(body_dict[VERSION]), str(self.version), str(resp.text))
return True
def verify_if_organization_exist(self, name):
"""
Verify if the organization exist
:param name: organization name
:return: return True if de organization does not exist, False if it does exist
"""
resp = http_utils.request(http_utils.GET, url=self.__create_url(ORGANIZATION_LIST, name), headers=self.__create_headers())
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - list of the names of the site's organizations...")
body_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
for i in range(len(body_dict[RESULT])):
if body_dict[RESULT][i] == name: return True
return False
def create_organization (self, name):
"""
Create a new organization if it does not exist
:param name: organization name
"""
self.organization = name
if not(self.verify_if_organization_exist(name)):
payload = general_utils.convert_dict_to_str({NAME: name}, general_utils.JSON)
resp= http_utils.request(http_utils.POST, url=self.__create_url(ORGANIZATION_CREATE), headers=self.__create_headers(), data=payload)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating organization: %s ..." % (name))
return True
return False
def get_organization (self):
"""
get organization name
:return: organization name
"""
return self.organization
def verify_if_dataset_exist(self, name):
"""
Verify if the dataset exist
:param name: dataset name
:return: return True if de dataset does not exist, False if it does exist
"""
resp = http_utils.request(http_utils.GET, url=self.__create_url(PACKAGE_SHOW, name), headers=self.__create_headers())
if resp.status_code == http_utils.status_codes[http_utils.OK]:
bodyDict= general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
self.dataset_id = bodyDict[RESULT][ID]
return self.dataset_id
return False
def create_dataset (self, name):
"""
Create a new dataset if it does not exist
:param name: dataset name
"""
self.dataset = name
if not(self.verify_if_dataset_exist( name)):
payload = general_utils.convert_dict_to_str({NAME: self.dataset,
OWNER_ORG: self.organization}, general_utils.JSON)
resp= http_utils.request(http_utils.POST, url=self.__create_url(PACKAGE_CREATE), headers=self.__create_headers(), data=payload)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating dataset: %s ..." % (name))
bodyDict= general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
self.dataset_id = bodyDict[RESULT][ID]
return bodyDict[RESULT][ID]
return False
def get_dataset (self):
"""
get dataset name and dataset id
:return: dataset name and dataset id
"""
return self.dataset, self.dataset_id
def verify_if_resource_exist(self, name, dataset_name):
"""
Verify if the resource exist in a dataset
:param name: resource name
:param dataset_id:
:return: return True if de resource does not exist, False if it does exist
"""
resp = http_utils.request(http_utils.GET, url=self.__create_url(PACKAGE_SHOW, dataset_name), headers=self.__create_headers())
if resp.status_code == http_utils.status_codes[http_utils.OK]:
body_dict = general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
for i in range(len(body_dict[RESULT][RESOURCES])):
if body_dict[RESULT][RESOURCES][i][NAME] == name:
self.resource_id = body_dict[RESULT][RESOURCES][i][ID]
return self.resource_id
return False
def generate_field_datastore_to_resource (self, attributes_number, attributes_name, attribute_type, metadata_type):
"""
generate fields to datastore request
:return: fields list
"""
field = []
field.append({ID:RECVTIME, TYPE: TIMESTAMP})
for i in range(0, int(attributes_number)):
if attribute_type != WITHOUT: field.append({ID:attributes_name+"_"+str(i), TYPE: attribute_type})
if metadata_type != WITHOUT:field.append({ID:attributes_name+"_"+str(i)+"_md", TYPE: metadata_type})
return field
def create_resource(self, name, dataset_name, fields=[]):
self.resource = name
if not(self.verify_if_resource_exist(name, dataset_name)):
payload = general_utils.convert_dict_to_str({NAME: self.resource,
URL: URL_EXAMPLE,
PACKAGE_ID: self.dataset_id}, general_utils.JSON)
resp= http_utils.request(http_utils.POST, url=self.__create_url(RESOURCE_CREATE), headers=self.__create_headers(), data=payload)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating resource: %s ..." % (name))
bodyDict= general_utils.convert_str_to_dict(resp.text, general_utils.JSON)
self.resource_id = bodyDict[RESULT][ID]
self.__create_datastore_in_resource (self.resource_id, fields)
return self.resource_id
return False
def get_resource (self):
"""
get resource name and resource id
:return: resource name and resource id
"""
return self.resource, self.resource_id
def datastore_search_last_sql (self, rows, resource_name, dataset_name):
"""
get last record in a resource
:param name: resource name
:param dataset_name: dataset name
:return: record dict
"""
resource_id = self.verify_if_resource_exist(resource_name, dataset_name)
if resource_id != False:
sql = 'SELECT * from "' + resource_id + '" ORDER BY 1 DESC LIMIT '+str (rows)
resp= http_utils.request(http_utils.POST, url=self.__create_url(DATASTORE_SEARCH_SQL, sql), headers=self.__create_headers(), data=EMPTY)
http_utils.assert_status_code(http_utils.status_codes[http_utils.OK], resp, "ERROR - creating resource: %s ..." % (resource_name))
return resp
return resource_id
|
agpl-3.0
| -1,284,196,080,977,096,200
| 46.864407
| 174
| 0.610552
| false
| 3.881253
| false
| false
| false
|
alphagov/notifications-delivery
|
tests/clients/test_aws_ses.py
|
1
|
1062
|
from moto import mock_ses
from notifications_delivery.clients.email.aws_ses import (AwsSesClient, AwsSesClientException)
@mock_ses
def test_send_email(ses_client):
aws_ses_client = AwsSesClient(region='eu-west-1')
source = "source@notify.gov.uk"
to_address = "random@random.com"
subject = "Email subject"
body = "Email body"
# All source email addresses have to be verified before you
# can send on behalf of them.
ses_client.verify_email_identity(EmailAddress=source)
message_id = aws_ses_client.send_email(source, to_address, subject, body)
assert message_id
@mock_ses
def test_send_email_not_verified(ses_client):
aws_ses_client = AwsSesClient(region='eu-west-1')
source = "source@notify.gov.uk"
to_address = "random@random.com"
subject = "Email subject"
body = "Email body"
try:
message_id = aws_ses_client.send_email(source, to_address, subject, body)
except AwsSesClientException as e:
assert 'Did not have authority to send from email source@notify.gov.uk' in str(e)
|
mit
| 3,366,847,243,641,234,400
| 35.62069
| 94
| 0.704331
| false
| 3.267692
| false
| false
| false
|
cocrawler/cocrawler
|
cocrawler/fetcher.py
|
1
|
10249
|
'''
async fetching of urls.
Assumes robots checks have already been done.
Success returns response object and response bytes (which were already
read in order to shake out all potential network-related exceptions.)
Failure returns enough details for the caller to do something smart:
503, other 5xx, DNS fail, connect timeout, error between connect and
full response, proxy failure. Plus an errorstring good enough for logging.
'''
import time
import traceback
from collections import namedtuple
import ssl
import urllib
import asyncio
import logging
import aiohttp
from . import stats
from . import config
from . import content
from .urls import URL
LOGGER = logging.getLogger(__name__)
# these errors get printed deep in aiohttp but they also bubble up
aiohttp_errors = {
'SSL handshake failed',
'SSL error errno:1 reason: CERTIFICATE_VERIFY_FAILED',
'SSL handshake failed on verifying the certificate',
'Fatal error on transport TCPTransport',
'Fatal error on SSL transport',
'SSL error errno:1 reason: UNKNOWN_PROTOCOL',
'Future exception was never retrieved',
'Unclosed connection',
'SSL error errno:1 reason: TLSV1_UNRECOGNIZED_NAME',
'SSL error errno:1 reason: SSLV3_ALERT_HANDSHAKE_FAILURE',
'SSL error errno:1 reason: TLSV1_ALERT_INTERNAL_ERROR',
}
class AsyncioSSLFilter(logging.Filter):
def filter(self, record):
stats.stats_sum('filter examined a {} {} log line'.format(record.name, record.levelname), 1)
if record.name == 'asyncio' and record.levelname == 'ERROR':
msg = record.getMessage()
for ae in aiohttp_errors:
if msg.startswith(ae):
stats.stats_sum('filter suppressed a asyncio ERROR log line', 1)
return False
return True
def establish_filters():
f = AsyncioSSLFilter()
logging.getLogger('asyncio').addFilter(f)
# XXX should be a policy plugin
# XXX cookie handling -- can be per-get -- make per-domain jar
def apply_url_policies(url, crawler):
headers = {}
headers['User-Agent'] = crawler.ua
if crawler.prevent_compression:
headers['Accept-Encoding'] = 'identity'
else:
headers['Accept-Encoding'] = content.get_accept_encoding()
if crawler.upgrade_insecure_requests:
headers['Upgrade-Insecure-Requests'] = '1'
proxy, prefetch_dns = global_policies()
get_kwargs = {'headers': headers, 'proxy': proxy}
return prefetch_dns, get_kwargs
def global_policies():
proxy = config.read('Fetcher', 'ProxyAll')
prefetch_dns = not proxy or config.read('GeoIP', 'ProxyGeoIP')
return proxy, prefetch_dns
FetcherResponse = namedtuple('FetcherResponse', ['response', 'body_bytes', 'ip', 'req_headers',
't_first_byte', 't_last_byte', 'is_truncated',
'last_exception'])
async def fetch(url, session,
allow_redirects=None, max_redirects=None,
stats_prefix='', max_page_size=-1, get_kwargs={}):
last_exception = None
is_truncated = False
response = None
try:
t0 = time.time()
last_exception = None
body_bytes = b''
blocks = []
left = max_page_size
ip = None
with stats.coroutine_state(stats_prefix+'fetcher fetching'):
with stats.record_latency(stats_prefix+'fetcher fetching', url=url.url):
response = await session.get(url.url,
allow_redirects=allow_redirects,
max_redirects=max_redirects,
**get_kwargs)
t_first_byte = '{:.3f}'.format(time.time() - t0)
if 'proxy' not in get_kwargs and response.connection:
# this is racy, often the connection is already None unless the crawler is busy
addr = response.connection.transport.get_extra_info('peername')
if addr:
stats.stats_sum(stats_prefix+'fetch ip from connection', 1)
ip = [addr[0]] # ipv4 or ipv6
while left > 0:
# reading stream directly to dodge decompression and limit size.
# this means that aiohttp tracing on_response_chunk_receive doesn't work
block = await response.content.read(left)
if not block:
body_bytes = b''.join(blocks)
break
blocks.append(block)
left -= len(block)
else:
body_bytes = b''.join(blocks)
if not response.content.at_eof():
stats.stats_sum(stats_prefix+'fetch truncated length', 1)
response.close() # this does interrupt the network transfer
is_truncated = 'length'
t_last_byte = '{:.3f}'.format(time.time() - t0)
except asyncio.TimeoutError:
stats.stats_sum(stats_prefix+'fetch timeout', 1)
last_exception = 'TimeoutError'
body_bytes = b''.join(blocks)
if len(body_bytes):
# these body_bytes are currently dropped because last_exception is set
is_truncated = 'time'
stats.stats_sum(stats_prefix+'fetch timeout body bytes found', 1)
stats.stats_sum(stats_prefix+'fetch timeout body bytes found bytes', len(body_bytes))
except (aiohttp.ClientError) as e:
# ClientError is a catchall for a bunch of things
# e.g. DNS errors, '400' errors for http parser errors
# ClientConnectorCertificateError for an SSL cert that doesn't match hostname
# ClientConnectorSSLError see https://bugs.python.org/issue27970 for python not handling missing intermediate certs
# ClientConnectorError(None, None) caused by robots redir to DNS fail
# ServerDisconnectedError(None,) caused by servers that return 0 bytes for robots.txt fetches
# TooManyRedirects("0, message=''",) caused by too many robots.txt redirs
stats.stats_sum(stats_prefix+'fetch ClientError', 1)
detailed_name = str(type(e).__name__)
last_exception = 'ClientError: ' + detailed_name + ': ' + str(e)
body_bytes = b''.join(blocks)
if len(body_bytes):
# these body_bytes are currently dropped because last_exception is set
is_truncated = 'disconnect'
stats.stats_sum(stats_prefix+'fetch ClientError body bytes found', 1)
stats.stats_sum(stats_prefix+'fetch ClientError body bytes found bytes', len(body_bytes))
except ssl.CertificateError as e:
# many ssl errors raise and have tracebacks printed deep in python, fixed in 3.8
stats.stats_sum(stats_prefix+'fetch SSL CertificateError', 1)
last_exception = 'CertificateError: ' + str(e)
except ValueError as e:
# no A records found -- raised by our dns code
# aiohttp raises:
# ValueError Location: https:/// 'Host could not be detected' -- robots fetch
# ValueError Location: http:// /URL should be absolute/ -- robots fetch
# ValueError 'Can redirect only to http or https' -- robots fetch -- looked OK to curl!
stats.stats_sum(stats_prefix+'fetch other error - ValueError', 1)
last_exception = 'ValueErorr: ' + str(e)
except AttributeError as e:
stats.stats_sum(stats_prefix+'fetch other error - AttributeError', 1)
last_exception = 'AttributeError: ' + str(e)
except RuntimeError as e:
stats.stats_sum(stats_prefix+'fetch other error - RuntimeError', 1)
last_exception = 'RuntimeError: ' + str(e)
except asyncio.CancelledError:
raise
except Exception as e:
last_exception = 'Exception: ' + str(e)
stats.stats_sum(stats_prefix+'fetch surprising error', 1)
LOGGER.info('Saw surprising exception in fetcher working on %s:\n%s', url.url, last_exception)
traceback.print_exc()
# if redirs are allowed the url must be set to the final url
if response and str(response.url) != url.url:
if allow_redirects:
url = URL(str(response.url))
else:
# TODO: this fires for quoting: {{%20data.src%20}} comes out %7B%7B%20data.src%20%7D%7D
LOGGER.error('Surprised that I fetched %s and got %s', url.url, str(response.url))
if last_exception is not None:
if body_bytes:
LOGGER.info('we failed working on %s, the last exception is %s, dropped %d body bytes', url.url, last_exception, len(body_bytes))
else:
LOGGER.info('we failed working on %s, the last exception is %s', url.url, last_exception)
return FetcherResponse(None, None, None, None, None, None, False, last_exception)
fr = FetcherResponse(response, body_bytes, ip, response.request_info.headers,
t_first_byte, t_last_byte, is_truncated, None)
if response.status >= 500:
LOGGER.debug('server returned http status %d', response.status)
stats.stats_sum(stats_prefix+'fetch bytes', len(body_bytes) + len(response.raw_headers))
stats.stats_sum(stats_prefix+'fetch URLs', 1)
stats.stats_sum(stats_prefix+'fetch http code=' + str(response.status), 1)
# checks after fetch:
# hsts header?
# if ssl, check strict-transport-security header, remember max-age=foo part., other stuff like includeSubDomains
# did we receive cookies? was the security bit set?
return fr
def upgrade_scheme(url):
'''
Upgrade crawled scheme to https, if reasonable. This helps to reduce MITM attacks against the crawler.
https://chromium.googlesource.com/chromium/src/net/+/master/http/transport_security_state_static.json
Alternately, the return headers from a site might have strict-transport-security set ... a bit more
dangerous as we'd have to respect the timeout to avoid permanently learning something that's broken
TODO: use HTTPSEverwhere? would have to have a fallback if https failed, which it occasionally will
'''
return url
|
apache-2.0
| -2,570,987,281,301,282,000
| 40.662602
| 141
| 0.630891
| false
| 4.181559
| false
| false
| false
|
lukaszkoczwara/presence-analyzer-lkoczwara
|
src/presence_analyzer/helpers.py
|
1
|
1777
|
# -*- coding: utf-8 -*-
"""
Helper functions used in views.
"""
from json import dumps
from functools import wraps
from flask import Response
def jsonify(function):
"""
Creates a response with the JSON representation of wrapped function result.
"""
@wraps(function)
def inner(*args, **kwargs):
return Response(dumps(function(*args, **kwargs)),
mimetype='application/json')
return inner
def group_by_weekday(items):
"""
Groups presence entries by weekday.
"""
result = {i: [] for i in range(7)}
for date in items:
start = items[date]['start']
end = items[date]['end']
result[date.weekday()].append(interval(start, end))
return result
def seconds_since_midnight(time):
"""
Calculates amount of seconds since midnight.
"""
return time.hour * 3600 + time.minute * 60 + time.second
def interval(start, end):
"""
Calculates inverval in seconds between two datetime.time objects.
"""
return seconds_since_midnight(end) - seconds_since_midnight(start)
def mean(items):
"""
Calculates arithmetic mean. Returns zero for empty lists.
"""
return float(sum(items)) / len(items) if len(items) > 0 else 0
def group_start_end_times_by_weekday(items):
"""
Groups start and end times in sec. by weekday.
"""
result = {i: {'start': [], 'end': []} for i in range(7)}
for date, start_end in items.iteritems():
start = start_end['start']
end = start_end['end']
result[date.weekday()]['start'].append(seconds_since_midnight(start))
result[date.weekday()]['end'].append(seconds_since_midnight(end))
return result
|
mit
| -492,233,173,606,348,740
| 24.924242
| 79
| 0.601013
| false
| 3.993258
| false
| false
| false
|
CompSci17/Survey-System
|
survey_system_files/results.py
|
1
|
11810
|
from .models import Answers, RadioResults, SelectResults, ImportanceOrderResults, CheckboxResults
from chartit import DataPool, Chart
class Results( ):
def render_results( self, questions, survey ):
"""
Sorts out logic behind how we present our answers.
@param questions QuerySet Questions we're working with
@param survey Object The survey we're rendering results for
@return Returns a tuple of answers to be utilised in the view.
Text/Textarea are of the form: ( input_type, list_of_answers, survey_object )
Every other input is of the form: ( input_type, chart_object, survey_object )
"""
# A list to hold our output tuples
output = []
for question in questions:
# For every question in the QuerySet, we're going to check and process
# it dependent on input type
if question.input_type == 'text':
# get question's results
results = self.get_results( question )
combined_results = []
for result in results:
# For every answer we have, put it inside a list
combined_results.append( str( result.text ) )
# Add our input type, list and primary key to our output list
output.append( ( "text", combined_results, question.pk ) )
elif question.input_type == 'textarea':
# get question's results
results = self.get_results( question )
combined_results = []
for result in results:
# For every answer we have, put it inside a list
combined_results.append( str( result.text ) )
# Add our input type, list and primary key to our output list
output.append( ( "textarea", combined_results, question.pk ) )
elif question.input_type == 'radio':
# Get all the options offered by the question
options = self.get_choices( question.choices )
# Dictionary for counting the occurrences of a selection
counter = {}
# Get our question's results
answers = self.get_results( question )
for option in options:
# For every option, add it to our dictionary; starting with 0
counter.update( { option.strip().replace( ",", "" ) : 0 } )
for answer in answers:
# For every answer, increment the answer in the dictionary
counter[ str( answer.text ).strip().replace( ",", "" ) ] += 1
for option in options:
# Check if the count for this question already exists
existence_check = RadioResults.objects.filter(
survey__exact = survey,
question__exact = question,
answer__exact = option.strip().replace( ",", "" )
)
if existence_check.exists( ):
# If it exists, pass in the primary key
result = RadioResults(
pk = existence_check[0].pk,
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_count = counter[ str( option ).strip().replace( ",", "" ) ]
)
else:
# If it doesn't exist, leave out the primary key
result = RadioResults(
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_count = counter[ str( option ).strip().replace( ",", "" ) ]
)
# Save our set of results
result.save()
# Get our chart object for the list
piechart = self.radio_pie_chart( question )
# Add our input type, chart object and primary key to our output list
output.append( ( "radio", piechart, question.pk ) )
elif question.input_type == 'select':
# Get all the options offered by the question
options = self.get_choices( question.choices )
# Dictionary for counting the occurrences of a selection
counter = {}
# Get our question's results
answers = self.get_results( question )
for option in options:
# For every option, add it to our dictionary; starting with 0
counter.update( { option.strip().replace( ",", "" ) : 0 } )
for answer in answers:
# For every answer, increment the answer in the dictionary
counter[ str( answer.text ).strip().replace( ",", "" ) ] += 1
for option in options:
# Check if the count for this question already exists
existence_check = SelectResults.objects.filter(
survey__exact = survey,
question__exact = question,
answer__exact = option.strip().replace( ",", "" )
)
if existence_check.exists( ):
# If it exists, pass in the primary key
result = SelectResults(
pk = existence_check[0].pk,
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_count = counter[ str( option ).strip().replace( ",", "" ) ]
)
else:
# If it doesn't exist, leave out the primary key
result = SelectResults(
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_count = counter[ str( option ).strip().replace( ",", "" ) ]
)
# Save our set of results
result.save()
# Get our chart object for the list
piechart = self.select_pie_chart( question )
# Add our input type, chart object and primary key to our output list
output.append( ( "select", piechart, question.pk ) )
elif question.input_type == 'checkbox':
# Get all the question's answers
answers = self.get_results( question )
# We'll use this to keep track of the answer count
counter = {}
# Get all the question's options/choices
options = self.get_choices( question.choices )
for option in options:
# initialise each option in the counter with 0
counter.update( { option.strip() : 0 } )
for answer in answers:
# Get a list of all the answers
delimited_answers = answer.text.split( "," )
for indiv_answer in delimited_answers:
# For every answer, increment it in the counter
counter[ indiv_answer.strip() ] += 1
for option in counter:
# Check if the question already has a count going in the database
existence_check = CheckboxResults.objects.filter(
survey__exact = survey,
question__exact = question,
answer__exact = option.strip()
)
if existence_check.exists():
# If it exists, just update it
result = CheckboxResults(
pk = existence_check[0].pk,
survey = survey,
question = question,
answer = option,
answer_count = counter[ option.strip() ]
)
else:
# If it doesn't exist, create it
result = CheckboxResults(
survey = survey,
question = question,
answer = option,
answer_count = counter[ option.strip() ]
)
# Save the result in the model
result.save()
# Create new bar chart
bar_chart = self.checkbox_bar_chart( question )
# Append the checkbox details to the returned output
output.append( ( "checkbox", bar_chart, question.pk ) )
elif question.input_type == 'order':
# Get all the question's options
options = self.get_choices( question.choices )
# Get the number of options
number_of_options = len( options )
# We'll use this to keep track of the answer count
counter = {}
for integer_counter in range( 1, number_of_options + 1 ):
# Initialise dict using integers with their own dictionaries
counter.update( { integer_counter: { } } )
for option in options:
# For every option, initialise the above integer's dicts with the option's counter at 0
counter[ integer_counter ].update( { str( option ).strip().replace( ",", "" ) : 0 } )
# Get the question's answers
answers = self.get_results( question )
for answer in answers:
# For every answer, split it at every comma
split_answers = answer.text.split( "," )
for i, result in enumerate( split_answers ):
# Increment the choice's counter by 1
counter[ i + 1 ][ result.strip().replace( ",", "" ) ] += 1
for position in counter:
for option in counter[ position ]:
existence_check = ImportanceOrderResults.objects.filter(
survey__exact = survey,
question__exact = question,
answer__exact = option.strip().replace( ",", "" ),
answer_position__exact = position
)
if existence_check.exists():
result = ImportanceOrderResults(
pk = existence_check[0].pk,
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_position = position,
answer_count = counter[ position ][ str( option ).strip().replace( ",", "" ) ]
)
else:
result = ImportanceOrderResults(
survey = survey,
question = question,
answer = option.strip().replace( ",", "" ),
answer_position = position,
answer_count = counter[ position ][ str( option ).strip().replace( ",", "" ) ]
)
result.save()
output.append( ( "order_of_importance", counter, str( question.pk ) ) )
return output
def get_choices( self, choices ):
"""
Get all the chocies/options for a question, delimiting them
by comma.
@param choices String String of choices from the question model
@return A list of choices/options
"""
CHOICES=[]
# Delimit our choices
choices_delimited = choices.split( ',' )
for choice in choices_delimited:
# For every choice, append the value to a list
CHOICES.append( str( choice ) )
# Return a list of choices/options
return CHOICES
def get_results( self, question ):
"""
Get all the answers for a question
@return QuerySet with all the answers for a question
"""
answers = Answers.objects.filter( question__exact = question )
return answers
def radio_pie_chart( request, question ):
"""
@return Piechart object for radio results
"""
ds = DataPool(
series=
[{'options': {
'source': RadioResults.objects.filter( question__exact = question )},
'terms': [
'answer',
'answer_count']}
])
chart = Chart(
datasource = ds,
series_options =
[{'options':{
'type': 'pie',
'stacking': False},
'terms':{
'answer': [
'answer_count']
}}],
chart_options =
{
'title': {
'text': question.text
}
}
)
return chart
def select_pie_chart( request, question ):
"""
@return Piechart object for select results
"""
ds = DataPool(
series=
[{'options': {
'source': SelectResults.objects.filter( question__exact = question )},
'terms': [
'answer',
'answer_count']}
])
chart = Chart(
datasource = ds,
series_options =
[{'options':{
'type': 'pie',
'stacking': False},
'terms':{
'answer': [
'answer_count']
}}],
chart_options =
{
'title': {
'text': question.text
}
}
)
return chart
def checkbox_bar_chart( request, question ):
"""
@return Barchart for checkbox results
"""
ds = DataPool(
series=
[{'options': {
'source': CheckboxResults.objects.filter( question__exact = question ) },
'terms': [
'answer',
'answer_count']}
])
chart = Chart(
datasource = ds,
series_options =
[{'options':{
'type': 'column',
'stacking': True},
'terms':{
'answer': [
'answer_count']
}}],
chart_options =
{'title': {
'text': question.text },
'xAxis': {
'title': {
'text': 'Answers'}}})
return chart
|
mit
| -1,745,348,226,818,002,700
| 27.807317
| 97
| 0.590686
| false
| 3.710336
| false
| false
| false
|
luckielordie/conan
|
conans/model/info.py
|
1
|
13408
|
import os
from conans.client.build.cppstd_flags import cppstd_default
from conans.errors import ConanException
from conans.model.env_info import EnvValues
from conans.model.options import OptionsValues
from conans.model.ref import PackageReference
from conans.model.values import Values
from conans.paths import CONANINFO
from conans.util.config_parser import ConfigParser
from conans.util.files import load
from conans.util.sha import sha1
class RequirementInfo(object):
def __init__(self, value_str, indirect=False):
""" parse the input into fields name, version...
"""
ref = PackageReference.loads(value_str)
self.package = ref
self.full_name = ref.conan.name
self.full_version = ref.conan.version
self.full_user = ref.conan.user
self.full_channel = ref.conan.channel
self.full_package_id = ref.package_id
# sha values
if indirect:
self.unrelated_mode()
else:
self.semver()
def dumps(self):
if not self.name:
return ""
result = ["%s/%s" % (self.name, self.version)]
if self.user or self.channel:
result.append("@%s/%s" % (self.user, self.channel))
if self.package_id:
result.append(":%s" % self.package_id)
return "".join(result)
@property
def sha(self):
return "/".join([str(n) for n in [self.name, self.version, self.user, self.channel,
self.package_id]])
def unrelated_mode(self):
self.name = self.version = self.user = self.channel = self.package_id = None
def semver_mode(self):
self.name = self.full_name
self.version = self.full_version.stable()
self.user = self.channel = self.package_id = None
semver = semver_mode
def full_version_mode(self):
self.name = self.full_name
self.version = self.full_version
self.user = self.channel = self.package_id = None
def patch_mode(self):
self.name = self.full_name
self.version = self.full_version.patch()
self.user = self.channel = self.package_id = None
def base_mode(self):
self.name = self.full_name
self.version = self.full_version.base
self.user = self.channel = self.package_id = None
def minor_mode(self):
self.name = self.full_name
self.version = self.full_version.minor()
self.user = self.channel = self.package_id = None
def major_mode(self):
self.name = self.full_name
self.version = self.full_version.major()
self.user = self.channel = self.package_id = None
def full_recipe_mode(self):
self.name = self.full_name
self.version = self.full_version
self.user = self.full_user
self.channel = self.full_channel
self.package_id = None
def full_package_mode(self):
self.name = self.full_name
self.version = self.full_version
self.user = self.full_user
self.channel = self.full_channel
self.package_id = self.full_package_id
class RequirementsInfo(object):
def __init__(self, requires):
# {PackageReference: RequirementInfo}
self._data = {r: RequirementInfo(str(r)) for r in requires}
def copy(self):
return RequirementsInfo(self._data.keys())
def clear(self):
self._data = {}
def remove(self, *args):
for name in args:
del self._data[self._get_key(name)]
def add(self, indirect_reqs):
""" necessary to propagate from upstream the real
package requirements
"""
for r in indirect_reqs:
self._data[r] = RequirementInfo(str(r), indirect=True)
def refs(self):
""" used for updating downstream requirements with this
"""
return list(self._data.keys())
def _get_key(self, item):
for reference in self._data:
if reference.conan.name == item:
return reference
raise ConanException("No requirement matching for %s" % (item))
def __getitem__(self, item):
"""get by package name
Necessary to access from conaninfo
self.requires["Boost"].version = "2.X"
"""
return self._data[self._get_key(item)]
@property
def pkg_names(self):
return [r.conan.name for r in self._data.keys()]
@property
def sha(self):
result = []
# Remove requirements without a name, i.e. indirect transitive requirements
data = {k: v for k, v in self._data.items() if v.name}
for key in sorted(data):
result.append(data[key].sha)
return sha1('\n'.join(result).encode())
def dumps(self):
result = []
for ref in sorted(self._data):
dumped = self._data[ref].dumps()
if dumped:
result.append(dumped)
return "\n".join(result)
def unrelated_mode(self):
self.clear()
def semver_mode(self):
for r in self._data.values():
r.semver_mode()
def patch_mode(self):
for r in self._data.values():
r.patch_mode()
def minor_mode(self):
for r in self._data.values():
r.minor_mode()
def major_mode(self):
for r in self._data.values():
r.major_mode()
def base_mode(self):
for r in self._data.values():
r.base_mode()
def full_version_mode(self):
for r in self._data.values():
r.full_version_mode()
def full_recipe_mode(self):
for r in self._data.values():
r.full_recipe_mode()
def full_package_mode(self):
for r in self._data.values():
r.full_package_mode()
class RequirementsList(list):
@staticmethod
def loads(text):
return RequirementsList.deserialize(text.splitlines())
def dumps(self):
return "\n".join(self.serialize())
def serialize(self):
return [str(r) for r in sorted(self)]
@staticmethod
def deserialize(data):
return RequirementsList([PackageReference.loads(line) for line in data])
class ConanInfo(object):
def copy(self):
""" Useful for build_id implementation
"""
result = ConanInfo()
result.settings = self.settings.copy()
result.options = self.options.copy()
result.requires = self.requires.copy()
return result
@staticmethod
def create(settings, options, requires, indirect_requires):
result = ConanInfo()
result.full_settings = settings
result.settings = settings.copy()
result.full_options = options
result.options = options.copy()
result.options.clear_indirect()
result.full_requires = RequirementsList(requires)
result.requires = RequirementsInfo(requires)
result.requires.add(indirect_requires)
result.full_requires.extend(indirect_requires)
result.recipe_hash = None
result.env_values = EnvValues()
result.vs_toolset_compatible()
result.discard_build_settings()
result.default_std_matching()
return result
@staticmethod
def loads(text):
parser = ConfigParser(text, ["settings", "full_settings", "options", "full_options",
"requires", "full_requires", "scope", "recipe_hash",
"env"], raise_unexpected_field=False)
result = ConanInfo()
result.settings = Values.loads(parser.settings)
result.full_settings = Values.loads(parser.full_settings)
result.options = OptionsValues.loads(parser.options)
result.full_options = OptionsValues.loads(parser.full_options)
result.full_requires = RequirementsList.loads(parser.full_requires)
result.requires = RequirementsInfo(result.full_requires)
result.recipe_hash = parser.recipe_hash or None
# TODO: Missing handling paring of requires, but not necessary now
result.env_values = EnvValues.loads(parser.env)
return result
def dumps(self):
def indent(text):
if not text:
return ""
return '\n'.join(" " + line for line in text.splitlines())
result = list()
result.append("[settings]")
result.append(indent(self.settings.dumps()))
result.append("\n[requires]")
result.append(indent(self.requires.dumps()))
result.append("\n[options]")
result.append(indent(self.options.dumps()))
result.append("\n[full_settings]")
result.append(indent(self.full_settings.dumps()))
result.append("\n[full_requires]")
result.append(indent(self.full_requires.dumps()))
result.append("\n[full_options]")
result.append(indent(self.full_options.dumps()))
result.append("\n[recipe_hash]\n%s" % indent(self.recipe_hash))
result.append("\n[env]")
result.append(indent(self.env_values.dumps()))
return '\n'.join(result) + "\n"
def __eq__(self, other):
""" currently just for testing purposes
"""
return self.dumps() == other.dumps()
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def load_file(conan_info_path):
""" load from file
"""
try:
config_text = load(conan_info_path)
except IOError:
raise ConanException("Does not exist %s" % conan_info_path)
else:
return ConanInfo.loads(config_text)
@staticmethod
def load_from_package(package_folder):
info_path = os.path.join(package_folder, CONANINFO)
return ConanInfo.load_file(info_path)
def package_id(self):
""" The package_id of a conans is the sha1 of its specific requirements,
options and settings
"""
computed_id = getattr(self, "_package_id", None)
if computed_id:
return computed_id
result = []
result.append(self.settings.sha)
# Only are valid requires for OPtions those Non-Dev who are still in requires
self.options.filter_used(self.requires.pkg_names)
result.append(self.options.sha)
result.append(self.requires.sha)
self._package_id = sha1('\n'.join(result).encode())
return self._package_id
def serialize_min(self):
"""
This info will be shown in search results.
"""
conan_info_json = {"settings": dict(self.settings.serialize()),
"options": dict(self.options.serialize()["options"]),
"full_requires": self.full_requires.serialize(),
"recipe_hash": self.recipe_hash}
return conan_info_json
def header_only(self):
self.settings.clear()
self.options.clear()
self.requires.unrelated_mode()
def vs_toolset_compatible(self):
"""Default behaviour, same package for toolset v140 with compiler=Visual Studio 15 than
using Visual Studio 14"""
if self.full_settings.compiler != "Visual Studio":
return
toolsets_versions = {
"v141": "15",
"v140": "14",
"v120": "12",
"v110": "11",
"v100": "10",
"v90": "9",
"v80": "8"}
toolset = str(self.full_settings.compiler.toolset)
version = toolsets_versions.get(toolset)
if version is not None:
self.settings.compiler.version = version
del self.settings.compiler.toolset
def vs_toolset_incompatible(self):
"""Will generate different packages for v140 and visual 15 than the visual 14"""
if self.full_settings.compiler != "Visual Studio":
return
self.settings.compiler.version = self.full_settings.compiler.version
self.settings.compiler.toolset = self.full_settings.compiler.toolset
def discard_build_settings(self):
# When os is defined, os_build is irrelevant for the consumer.
# only when os_build is alone (installers, etc) it has to be present in the package_id
if self.full_settings.os and self.full_settings.os_build:
del self.settings.os_build
if self.full_settings.arch and self.full_settings.arch_build:
del self.settings.arch_build
def include_build_settings(self):
self.settings.os_build = self.full_settings.os_build
self.settings.arch_build = self.full_settings.arch_build
def default_std_matching(self):
"""
If we are building with gcc 7, and we specify -s cppstd=gnu14, it's the default, so the
same as specifying None, packages are the same
"""
if self.full_settings.cppstd and \
self.full_settings.compiler and \
self.full_settings.compiler.version:
default = cppstd_default(str(self.full_settings.compiler),
str(self.full_settings.compiler.version))
if default == str(self.full_settings.cppstd):
self.settings.cppstd = None
def default_std_non_matching(self):
if self.full_settings.cppstd:
self.settings.cppstd = self.full_settings.cppstd
|
mit
| -8,335,431,246,790,530,000
| 32.7733
| 95
| 0.598747
| false
| 4.008371
| false
| false
| false
|
HewlettPackard/oneview-ansible
|
library/oneview_network_set_facts.py
|
1
|
4474
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_network_set_facts
short_description: Retrieve facts about the OneView Network Sets
description:
- Retrieve facts about the Network Sets from OneView.
version_added: "2.4"
requirements:
- hpeOneView >= 5.4.0
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- Network Set name.
options:
description:
- "List with options to gather facts about Network Set.
Option allowed: C(withoutEthernet).
The option C(withoutEthernet) retrieves the list of network_sets excluding Ethernet networks."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Network Sets
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather paginated, filtered, and sorted facts about Network Sets
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
params:
start: 0
count: 3
sort: 'name:descending'
filter: name='netset001'
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather facts about all Network Sets, excluding Ethernet networks
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
options:
- withoutEthernet
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather facts about a Network Set by name
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: Name of the Network Set
no_log: true
delegate_to: localhost
- debug: var=network_sets
- name: Gather facts about a Network Set by name, excluding Ethernet networks
oneview_network_set_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: Name of the Network Set
options:
- withoutEthernet
no_log: true
delegate_to: localhost
- debug: var=network_sets
'''
RETURN = '''
network_sets:
description: Has all the OneView facts about the Network Sets.
returned: Always, but can be empty.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class NetworkSetFactsModule(OneViewModule):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict'),
)
def __init__(self):
super(NetworkSetFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.set_resource_object(self.oneview_client.network_sets)
def execute_module(self):
name = self.module.params.get('name')
if 'withoutEthernet' in self.options:
filter_by_name = ("\"'name'='%s'\"" % name) if name else ''
network_sets = self.resource_client.get_all_without_ethernet(filter=filter_by_name)
elif name:
network_sets = self.resource_client.get_by('name', name)
else:
network_sets = self.resource_client.get_all(**self.facts_params)
return dict(changed=False,
ansible_facts=dict(network_sets=network_sets))
def main():
NetworkSetFactsModule().run()
if __name__ == '__main__':
main()
|
apache-2.0
| -7,717,356,519,994,723,000
| 25.790419
| 104
| 0.673894
| false
| 3.753356
| false
| false
| false
|
anntzer/scikit-learn
|
sklearn/utils/_estimator_html_repr.py
|
1
|
9497
|
from contextlib import closing
from contextlib import suppress
from io import StringIO
from string import Template
import uuid
import html
from sklearn import config_context
class _VisualBlock:
"""HTML Representation of Estimator
Parameters
----------
kind : {'serial', 'parallel', 'single'}
kind of HTML block
estimators : list of estimators or `_VisualBlock`s or a single estimator
If kind != 'single', then `estimators` is a list of
estimators.
If kind == 'single', then `estimators` is a single estimator.
names : list of str, default=None
If kind != 'single', then `names` corresponds to estimators.
If kind == 'single', then `names` is a single string corresponding to
the single estimator.
name_details : list of str, str, or None, default=None
If kind != 'single', then `name_details` corresponds to `names`.
If kind == 'single', then `name_details` is a single string
corresponding to the single estimator.
dash_wrapped : bool, default=True
If true, wrapped HTML element will be wrapped with a dashed border.
Only active when kind != 'single'.
"""
def __init__(self, kind, estimators, *, names=None, name_details=None,
dash_wrapped=True):
self.kind = kind
self.estimators = estimators
self.dash_wrapped = dash_wrapped
if self.kind in ('parallel', 'serial'):
if names is None:
names = (None, ) * len(estimators)
if name_details is None:
name_details = (None, ) * len(estimators)
self.names = names
self.name_details = name_details
def _sk_visual_block_(self):
return self
def _write_label_html(out, name, name_details,
outer_class="sk-label-container",
inner_class="sk-label",
checked=False):
"""Write labeled html with or without a dropdown with named details"""
out.write(f'<div class="{outer_class}">'
f'<div class="{inner_class} sk-toggleable">')
name = html.escape(name)
if name_details is not None:
checked_str = 'checked' if checked else ''
est_id = uuid.uuid4()
out.write(f'<input class="sk-toggleable__control sk-hidden--visually" '
f'id="{est_id}" type="checkbox" {checked_str}>'
f'<label class="sk-toggleable__label" for="{est_id}">'
f'{name}</label>'
f'<div class="sk-toggleable__content"><pre>{name_details}'
f'</pre></div>')
else:
out.write(f'<label>{name}</label>')
out.write('</div></div>') # outer_class inner_class
def _get_visual_block(estimator):
"""Generate information about how to display an estimator.
"""
with suppress(AttributeError):
return estimator._sk_visual_block_()
if isinstance(estimator, str):
return _VisualBlock('single', estimator,
names=estimator, name_details=estimator)
elif estimator is None:
return _VisualBlock('single', estimator,
names='None', name_details='None')
# check if estimator looks like a meta estimator wraps estimators
if hasattr(estimator, 'get_params'):
estimators = []
for key, value in estimator.get_params().items():
# Only look at the estimators in the first layer
if '__' not in key and hasattr(value, 'get_params'):
estimators.append(value)
if len(estimators):
return _VisualBlock('parallel', estimators, names=None)
return _VisualBlock('single', estimator,
names=estimator.__class__.__name__,
name_details=str(estimator))
def _write_estimator_html(out, estimator, estimator_label,
estimator_label_details, first_call=False):
"""Write estimator to html in serial, parallel, or by itself (single).
"""
if first_call:
est_block = _get_visual_block(estimator)
else:
with config_context(print_changed_only=True):
est_block = _get_visual_block(estimator)
if est_block.kind in ('serial', 'parallel'):
dashed_wrapped = first_call or est_block.dash_wrapped
dash_cls = " sk-dashed-wrapped" if dashed_wrapped else ""
out.write(f'<div class="sk-item{dash_cls}">')
if estimator_label:
_write_label_html(out, estimator_label, estimator_label_details)
kind = est_block.kind
out.write(f'<div class="sk-{kind}">')
est_infos = zip(est_block.estimators, est_block.names,
est_block.name_details)
for est, name, name_details in est_infos:
if kind == 'serial':
_write_estimator_html(out, est, name, name_details)
else: # parallel
out.write('<div class="sk-parallel-item">')
# wrap element in a serial visualblock
serial_block = _VisualBlock('serial', [est],
dash_wrapped=False)
_write_estimator_html(out, serial_block, name, name_details)
out.write('</div>') # sk-parallel-item
out.write('</div></div>')
elif est_block.kind == 'single':
_write_label_html(out, est_block.names, est_block.name_details,
outer_class="sk-item", inner_class="sk-estimator",
checked=first_call)
_STYLE = """
#$id {
color: black;
background-color: white;
}
#$id pre{
padding: 0;
}
#$id div.sk-toggleable {
background-color: white;
}
#$id label.sk-toggleable__label {
cursor: pointer;
display: block;
width: 100%;
margin-bottom: 0;
padding: 0.2em 0.3em;
box-sizing: border-box;
text-align: center;
}
#$id div.sk-toggleable__content {
max-height: 0;
max-width: 0;
overflow: hidden;
text-align: left;
background-color: #f0f8ff;
}
#$id div.sk-toggleable__content pre {
margin: 0.2em;
color: black;
border-radius: 0.25em;
background-color: #f0f8ff;
}
#$id input.sk-toggleable__control:checked~div.sk-toggleable__content {
max-height: 200px;
max-width: 100%;
overflow: auto;
}
#$id div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {
background-color: #d4ebff;
}
#$id div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {
background-color: #d4ebff;
}
#$id input.sk-hidden--visually {
border: 0;
clip: rect(1px 1px 1px 1px);
clip: rect(1px, 1px, 1px, 1px);
height: 1px;
margin: -1px;
overflow: hidden;
padding: 0;
position: absolute;
width: 1px;
}
#$id div.sk-estimator {
font-family: monospace;
background-color: #f0f8ff;
margin: 0.25em 0.25em;
border: 1px dotted black;
border-radius: 0.25em;
box-sizing: border-box;
}
#$id div.sk-estimator:hover {
background-color: #d4ebff;
}
#$id div.sk-parallel-item::after {
content: "";
width: 100%;
border-bottom: 1px solid gray;
flex-grow: 1;
}
#$id div.sk-label:hover label.sk-toggleable__label {
background-color: #d4ebff;
}
#$id div.sk-serial::before {
content: "";
position: absolute;
border-left: 1px solid gray;
box-sizing: border-box;
top: 2em;
bottom: 0;
left: 50%;
}
#$id div.sk-serial {
display: flex;
flex-direction: column;
align-items: center;
background-color: white;
}
#$id div.sk-item {
z-index: 1;
}
#$id div.sk-parallel {
display: flex;
align-items: stretch;
justify-content: center;
background-color: white;
}
#$id div.sk-parallel-item {
display: flex;
flex-direction: column;
position: relative;
background-color: white;
}
#$id div.sk-parallel-item:first-child::after {
align-self: flex-end;
width: 50%;
}
#$id div.sk-parallel-item:last-child::after {
align-self: flex-start;
width: 50%;
}
#$id div.sk-parallel-item:only-child::after {
width: 0;
}
#$id div.sk-dashed-wrapped {
border: 1px dashed gray;
margin: 0.2em;
box-sizing: border-box;
padding-bottom: 0.1em;
background-color: white;
position: relative;
}
#$id div.sk-label label {
font-family: monospace;
font-weight: bold;
background-color: white;
display: inline-block;
line-height: 1.2em;
}
#$id div.sk-label-container {
position: relative;
z-index: 2;
text-align: center;
}
#$id div.sk-container {
display: inline-block;
position: relative;
}
""".replace(' ', '').replace('\n', '') # noqa
def estimator_html_repr(estimator):
"""Build a HTML representation of an estimator.
Read more in the :ref:`User Guide <visualizing_composite_estimators>`.
Parameters
----------
estimator : estimator object
The estimator to visualize.
Returns
-------
html: str
HTML representation of estimator.
"""
with closing(StringIO()) as out:
container_id = "sk-" + str(uuid.uuid4())
style_template = Template(_STYLE)
style_with_id = style_template.substitute(id=container_id)
out.write(f'<style>{style_with_id}</style>'
f'<div id="{container_id}" class"sk-top-container">'
'<div class="sk-container">')
_write_estimator_html(out, estimator, estimator.__class__.__name__,
str(estimator), first_call=True)
out.write('</div></div>')
html_output = out.getvalue()
return html_output
|
bsd-3-clause
| -6,215,935,959,145,116,000
| 28.77116
| 87
| 0.605349
| false
| 3.611027
| false
| false
| false
|
henry0312/LightGBM
|
python-package/lightgbm/basic.py
|
1
|
150238
|
# coding: utf-8
"""Wrapper for C API of LightGBM."""
import ctypes
import json
import os
import warnings
from collections import OrderedDict
from copy import deepcopy
from functools import wraps
from logging import Logger
from tempfile import NamedTemporaryFile
from typing import Any, Dict, List, Set, Union
import numpy as np
import scipy.sparse
from .compat import PANDAS_INSTALLED, concat, dt_DataTable, is_dtype_sparse, pd_DataFrame, pd_Series
from .libpath import find_lib_path
class _DummyLogger:
def info(self, msg):
print(msg)
def warning(self, msg):
warnings.warn(msg, stacklevel=3)
_LOGGER = _DummyLogger()
def register_logger(logger):
"""Register custom logger.
Parameters
----------
logger : logging.Logger
Custom logger.
"""
if not isinstance(logger, Logger):
raise TypeError("Logger should inherit logging.Logger class")
global _LOGGER
_LOGGER = logger
def _normalize_native_string(func):
"""Join log messages from native library which come by chunks."""
msg_normalized = []
@wraps(func)
def wrapper(msg):
nonlocal msg_normalized
if msg.strip() == '':
msg = ''.join(msg_normalized)
msg_normalized = []
return func(msg)
else:
msg_normalized.append(msg)
return wrapper
def _log_info(msg):
_LOGGER.info(msg)
def _log_warning(msg):
_LOGGER.warning(msg)
@_normalize_native_string
def _log_native(msg):
_LOGGER.info(msg)
def _log_callback(msg):
"""Redirect logs from native library into Python."""
_log_native(str(msg.decode('utf-8')))
def _load_lib():
"""Load LightGBM library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
callback = ctypes.CFUNCTYPE(None, ctypes.c_char_p)
lib.callback = callback(_log_callback)
if lib.LGBM_RegisterLogCallback(lib.callback) != 0:
raise LightGBMError(lib.LGBM_GetLastError().decode('utf-8'))
return lib
_LIB = _load_lib()
NUMERIC_TYPES = (int, float, bool)
def _safe_call(ret):
"""Check the return value from C API call.
Parameters
----------
ret : int
The return value from C API calls.
"""
if ret != 0:
raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8'))
def is_numeric(obj):
"""Check whether object is a number or not, include numpy number, etc."""
try:
float(obj)
return True
except (TypeError, ValueError):
# TypeError: obj is not a string or a number
# ValueError: invalid literal
return False
def is_numpy_1d_array(data):
"""Check whether data is a numpy 1-D array."""
return isinstance(data, np.ndarray) and len(data.shape) == 1
def is_numpy_column_array(data):
"""Check whether data is a column numpy array."""
if not isinstance(data, np.ndarray):
return False
shape = data.shape
return len(shape) == 2 and shape[1] == 1
def cast_numpy_1d_array_to_dtype(array, dtype):
"""Cast numpy 1d array to given dtype."""
if array.dtype == dtype:
return array
return array.astype(dtype=dtype, copy=False)
def is_1d_list(data):
"""Check whether data is a 1-D list."""
return isinstance(data, list) and (not data or is_numeric(data[0]))
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""Convert data to numpy 1-D array."""
if is_numpy_1d_array(data):
return cast_numpy_1d_array_to_dtype(data, dtype)
elif is_numpy_column_array(data):
_log_warning('Converting column-vector to 1d array')
array = data.ravel()
return cast_numpy_1d_array_to_dtype(array, dtype)
elif is_1d_list(data):
return np.array(data, dtype=dtype, copy=False)
elif isinstance(data, pd_Series):
if _get_bad_pandas_dtypes([data.dtypes]):
raise ValueError('Series.dtypes must be int, float or bool')
return np.array(data, dtype=dtype, copy=False) # SparseArray should be supported as well
else:
raise TypeError(f"Wrong type({type(data).__name__}) for {name}.\n"
"It should be list, numpy 1-D array or pandas Series")
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected float pointer')
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected double pointer')
def cint32_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected int32 pointer')
def cint64_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int64)):
return np.ctypeslib.as_array(cptr, shape=(length,)).copy()
else:
raise RuntimeError('Expected int64 pointer')
def c_str(string):
"""Convert a Python string to C string."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a Python array to C array."""
return (ctype * len(values))(*values)
def json_default_with_numpy(obj):
"""Convert numpy classes to JSON serializable objects."""
if isinstance(obj, (np.integer, np.floating, np.bool_)):
return obj.item()
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj
def param_dict_to_str(data):
"""Convert Python dictionary to string, which is passed to C API."""
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
def to_string(x):
if isinstance(x, list):
return f"[{','.join(map(str, x))}]"
else:
return str(x)
pairs.append(f"{key}={','.join(map(to_string, val))}")
elif isinstance(val, (str, NUMERIC_TYPES)) or is_numeric(val):
pairs.append(f"{key}={val}")
elif val is not None:
raise TypeError(f'Unknown type of parameter:{key}, got:{type(val).__name__}')
return ' '.join(pairs)
class _TempFile:
def __enter__(self):
with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
self.name = f.name
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if os.path.isfile(self.name):
os.remove(self.name)
def readlines(self):
with open(self.name, "r+") as f:
ret = f.readlines()
return ret
def writelines(self, lines):
with open(self.name, "w+") as f:
f.writelines(lines)
class LightGBMError(Exception):
"""Error thrown by LightGBM."""
pass
# DeprecationWarning is not shown by default, so let's create our own with higher level
class LGBMDeprecationWarning(UserWarning):
"""Custom deprecation warning."""
pass
class _ConfigAliases:
aliases = {"bin_construct_sample_cnt": {"bin_construct_sample_cnt",
"subsample_for_bin"},
"boosting": {"boosting",
"boosting_type",
"boost"},
"categorical_feature": {"categorical_feature",
"cat_feature",
"categorical_column",
"cat_column"},
"data_random_seed": {"data_random_seed",
"data_seed"},
"early_stopping_round": {"early_stopping_round",
"early_stopping_rounds",
"early_stopping",
"n_iter_no_change"},
"enable_bundle": {"enable_bundle",
"is_enable_bundle",
"bundle"},
"eval_at": {"eval_at",
"ndcg_eval_at",
"ndcg_at",
"map_eval_at",
"map_at"},
"group_column": {"group_column",
"group",
"group_id",
"query_column",
"query",
"query_id"},
"header": {"header",
"has_header"},
"ignore_column": {"ignore_column",
"ignore_feature",
"blacklist"},
"is_enable_sparse": {"is_enable_sparse",
"is_sparse",
"enable_sparse",
"sparse"},
"label_column": {"label_column",
"label"},
"local_listen_port": {"local_listen_port",
"local_port",
"port"},
"machines": {"machines",
"workers",
"nodes"},
"metric": {"metric",
"metrics",
"metric_types"},
"num_class": {"num_class",
"num_classes"},
"num_iterations": {"num_iterations",
"num_iteration",
"n_iter",
"num_tree",
"num_trees",
"num_round",
"num_rounds",
"num_boost_round",
"n_estimators"},
"num_machines": {"num_machines",
"num_machine"},
"num_threads": {"num_threads",
"num_thread",
"nthread",
"nthreads",
"n_jobs"},
"objective": {"objective",
"objective_type",
"app",
"application"},
"pre_partition": {"pre_partition",
"is_pre_partition"},
"tree_learner": {"tree_learner",
"tree",
"tree_type",
"tree_learner_type"},
"two_round": {"two_round",
"two_round_loading",
"use_two_round_loading"},
"verbosity": {"verbosity",
"verbose"},
"weight_column": {"weight_column",
"weight"}}
@classmethod
def get(cls, *args):
ret = set()
for i in args:
ret |= cls.aliases.get(i, {i})
return ret
def _choose_param_value(main_param_name: str, params: Dict[str, Any], default_value: Any) -> Dict[str, Any]:
"""Get a single parameter value, accounting for aliases.
Parameters
----------
main_param_name : str
Name of the main parameter to get a value for. One of the keys of ``_ConfigAliases``.
params : dict
Dictionary of LightGBM parameters.
default_value : Any
Default value to use for the parameter, if none is found in ``params``.
Returns
-------
params : dict
A ``params`` dict with exactly one value for ``main_param_name``, and all aliases ``main_param_name`` removed.
If both ``main_param_name`` and one or more aliases for it are found, the value of ``main_param_name`` will be preferred.
"""
# avoid side effects on passed-in parameters
params = deepcopy(params)
# find a value, and remove other aliases with .pop()
# prefer the value of 'main_param_name' if it exists, otherwise search the aliases
found_value = None
if main_param_name in params.keys():
found_value = params[main_param_name]
for param in _ConfigAliases.get(main_param_name):
val = params.pop(param, None)
if found_value is None and val is not None:
found_value = val
if found_value is not None:
params[main_param_name] = found_value
else:
params[main_param_name] = default_value
return params
MAX_INT32 = (1 << 31) - 1
"""Macro definition of data type in C API of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
"""Matrix is row major in Python"""
C_API_IS_ROW_MAJOR = 1
"""Macro definition of prediction type in C API of LightGBM"""
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
C_API_PREDICT_CONTRIB = 3
"""Macro definition of sparse matrix type"""
C_API_MATRIX_TYPE_CSR = 0
C_API_MATRIX_TYPE_CSC = 1
"""Macro definition of feature importance type"""
C_API_FEATURE_IMPORTANCE_SPLIT = 0
C_API_FEATURE_IMPORTANCE_GAIN = 1
"""Data type of data field"""
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
"weight": C_API_DTYPE_FLOAT32,
"init_score": C_API_DTYPE_FLOAT64,
"group": C_API_DTYPE_INT32}
"""String name to int feature importance type mapper"""
FEATURE_IMPORTANCE_TYPE_MAPPER = {"split": C_API_FEATURE_IMPORTANCE_SPLIT,
"gain": C_API_FEATURE_IMPORTANCE_GAIN}
def convert_from_sliced_object(data):
"""Fix the memory of multi-dimensional sliced object."""
if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
if not data.flags.c_contiguous:
_log_warning("Usage of np.ndarray subset (sliced data) is not recommended "
"due to it will double the peak memory cost in LightGBM.")
return np.copy(data)
return data
def c_float_array(data):
"""Get pointer of float numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
else:
raise TypeError(f"Expected np.float32 or np.float64, met type({data.dtype})")
else:
raise TypeError(f"Unknown type({type(data).__name__})")
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def c_int_array(data):
"""Get pointer of int numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError(f"Expected np.int32 or np.int64, met type({data.dtype})")
else:
raise TypeError(f"Unknown type({type(data).__name__})")
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def _get_bad_pandas_dtypes(dtypes):
pandas_dtype_mapper = {'int8': 'int', 'int16': 'int', 'int32': 'int',
'int64': 'int', 'uint8': 'int', 'uint16': 'int',
'uint32': 'int', 'uint64': 'int', 'bool': 'int',
'float16': 'float', 'float32': 'float', 'float64': 'float'}
bad_indices = [i for i, dtype in enumerate(dtypes) if (dtype.name not in pandas_dtype_mapper
and (not is_dtype_sparse(dtype)
or dtype.subtype.name not in pandas_dtype_mapper))]
return bad_indices
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, pd_DataFrame):
if len(data.shape) != 2 or data.shape[0] < 1:
raise ValueError('Input data must be 2 dimensional and non empty.')
if feature_name == 'auto' or feature_name is None:
data = data.rename(columns=str)
cat_cols = list(data.select_dtypes(include=['category']).columns)
cat_cols_not_ordered = [col for col in cat_cols if not data[col].cat.ordered]
if pandas_categorical is None: # train dataset
pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
else:
if len(cat_cols) != len(pandas_categorical):
raise ValueError('train and valid dataset categorical_feature do not match.')
for col, category in zip(cat_cols, pandas_categorical):
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is list
data = data.copy() # not alter origin DataFrame
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes).replace({-1: np.nan})
if categorical_feature is not None:
if feature_name is None:
feature_name = list(data.columns)
if categorical_feature == 'auto': # use cat cols from DataFrame
categorical_feature = cat_cols_not_ordered
else: # use cat cols specified by user
categorical_feature = list(categorical_feature)
if feature_name == 'auto':
feature_name = list(data.columns)
bad_indices = _get_bad_pandas_dtypes(data.dtypes)
if bad_indices:
bad_index_cols_str = ', '.join(data.columns[bad_indices])
raise ValueError("DataFrame.dtypes for data must be int, float or bool.\n"
"Did not expect the data types in the following fields: "
f"{bad_index_cols_str}")
data = data.values
if data.dtype != np.float32 and data.dtype != np.float64:
data = data.astype(np.float32)
else:
if feature_name == 'auto':
feature_name = None
if categorical_feature == 'auto':
categorical_feature = None
return data, feature_name, categorical_feature, pandas_categorical
def _label_from_pandas(label):
if isinstance(label, pd_DataFrame):
if len(label.columns) > 1:
raise ValueError('DataFrame for label cannot have multiple columns')
if _get_bad_pandas_dtypes(label.dtypes):
raise ValueError('DataFrame.dtypes for label must be int, float or bool')
label = np.ravel(label.values.astype(np.float32, copy=False))
return label
def _dump_pandas_categorical(pandas_categorical, file_name=None):
categorical_json = json.dumps(pandas_categorical, default=json_default_with_numpy)
pandas_str = f'\npandas_categorical:{categorical_json}\n'
if file_name is not None:
with open(file_name, 'a') as f:
f.write(pandas_str)
return pandas_str
def _load_pandas_categorical(file_name=None, model_str=None):
pandas_key = 'pandas_categorical:'
offset = -len(pandas_key)
if file_name is not None:
max_offset = -os.path.getsize(file_name)
with open(file_name, 'rb') as f:
while True:
if offset < max_offset:
offset = max_offset
f.seek(offset, os.SEEK_END)
lines = f.readlines()
if len(lines) >= 2:
break
offset *= 2
last_line = lines[-1].decode('utf-8').strip()
if not last_line.startswith(pandas_key):
last_line = lines[-2].decode('utf-8').strip()
elif model_str is not None:
idx = model_str.rfind('\n', 0, offset)
last_line = model_str[idx:].strip()
if last_line.startswith(pandas_key):
return json.loads(last_line[len(pandas_key):])
else:
return None
class _InnerPredictor:
"""_InnerPredictor of LightGBM.
Not exposed to user.
Used only for prediction, usually used for continued training.
.. note::
Can be converted from Booster, but cannot be converted to Booster.
"""
def __init__(self, model_file=None, booster_handle=None, pred_parameter=None):
"""Initialize the _InnerPredictor.
Parameters
----------
model_file : string or None, optional (default=None)
Path to the model file.
booster_handle : object or None, optional (default=None)
Handle of Booster.
pred_parameter: dict or None, optional (default=None)
Other parameters for the prediciton.
"""
self.handle = ctypes.c_void_p()
self.__is_manage_handle = True
if model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif booster_handle is not None:
self.__is_manage_handle = False
self.handle = booster_handle
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = self.current_iteration()
self.pandas_categorical = None
else:
raise TypeError('Need model_file or booster_handle to create a predictor')
pred_parameter = {} if pred_parameter is None else pred_parameter
self.pred_parameter = param_dict_to_str(pred_parameter)
def __del__(self):
try:
if self.__is_manage_handle:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __getstate__(self):
this = self.__dict__.copy()
this.pop('handle', None)
return this
def predict(self, data, start_iteration=0, num_iteration=-1,
raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False,
is_reshape=True):
"""Predict logic.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
When data type is string, it represents the path of txt file.
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
num_iteration : int, optional (default=-1)
Iteration used for prediction.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
data_has_header : bool, optional (default=False)
Whether data has header.
Used only for txt data.
is_reshape : bool, optional (default=True)
Whether to reshape to (nrow, ncol).
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
if isinstance(data, Dataset):
raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
predict_type = C_API_PREDICT_NORMAL
if raw_score:
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf:
predict_type = C_API_PREDICT_LEAF_INDEX
if pred_contrib:
predict_type = C_API_PREDICT_CONTRIB
int_data_has_header = 1 if data_has_header else 0
if isinstance(data, str):
with _TempFile() as f:
_safe_call(_LIB.LGBM_BoosterPredictForFile(
self.handle,
c_str(data),
ctypes.c_int(int_data_has_header),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
c_str(f.name)))
lines = f.readlines()
nrow = len(lines)
preds = [float(token) for line in lines for token in line.split('\t')]
preds = np.array(preds, dtype=np.float64, copy=False)
elif isinstance(data, scipy.sparse.csr_matrix):
preds, nrow = self.__pred_for_csr(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, scipy.sparse.csc_matrix):
preds, nrow = self.__pred_for_csc(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, np.ndarray):
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, list):
try:
data = np.array(data)
except BaseException:
raise ValueError('Cannot convert data list to numpy array.')
preds, nrow = self.__pred_for_np2d(data, start_iteration, num_iteration, predict_type)
elif isinstance(data, dt_DataTable):
preds, nrow = self.__pred_for_np2d(data.to_numpy(), start_iteration, num_iteration, predict_type)
else:
try:
_log_warning('Converting data to scipy sparse matrix.')
csr = scipy.sparse.csr_matrix(data)
except BaseException:
raise TypeError(f'Cannot predict data for type {type(data).__name__}')
preds, nrow = self.__pred_for_csr(csr, start_iteration, num_iteration, predict_type)
if pred_leaf:
preds = preds.astype(np.int32)
is_sparse = scipy.sparse.issparse(preds) or isinstance(preds, list)
if is_reshape and not is_sparse and preds.size != nrow:
if preds.size % nrow == 0:
preds = preds.reshape(nrow, -1)
else:
raise ValueError(f'Length of predict result ({preds.size}) cannot be divide nrow ({nrow})')
return preds
def __get_num_preds(self, start_iteration, num_iteration, nrow, predict_type):
"""Get size of prediction result."""
if nrow > MAX_INT32:
raise LightGBMError('LightGBM cannot perform prediction for data'
f'with number of rows greater than MAX_INT32 ({MAX_INT32}).\n'
'You can split your data into chunks'
'and then concatenate predictions for them')
n_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterCalcNumPredict(
self.handle,
ctypes.c_int(nrow),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.byref(n_preds)))
return n_preds.value
def __pred_for_np2d(self, mat, start_iteration, num_iteration, predict_type):
"""Predict for a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray or list must be 2 dimensional')
def inner_predict(mat, start_iteration, num_iteration, predict_type, preds=None):
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
n_preds = self.__get_num_preds(start_iteration, num_iteration, mat.shape[0], predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int32(mat.shape[0]),
ctypes.c_int32(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0]
nrow = mat.shape[0]
if nrow > MAX_INT32:
sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for chunk, (start_idx_pred, end_idx_pred) in zip(np.array_split(mat, sections),
zip(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(chunk, start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(mat, start_iteration, num_iteration, predict_type)
def __create_sparse_native(self, cs, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
indptr_type, data_type, is_csr=True):
# create numpy array from output arrays
data_indices_len = out_shape[0]
indptr_len = out_shape[1]
if indptr_type == C_API_DTYPE_INT32:
out_indptr = cint32_array_to_numpy(out_ptr_indptr, indptr_len)
elif indptr_type == C_API_DTYPE_INT64:
out_indptr = cint64_array_to_numpy(out_ptr_indptr, indptr_len)
else:
raise TypeError("Expected int32 or int64 type for indptr")
if data_type == C_API_DTYPE_FLOAT32:
out_data = cfloat32_array_to_numpy(out_ptr_data, data_indices_len)
elif data_type == C_API_DTYPE_FLOAT64:
out_data = cfloat64_array_to_numpy(out_ptr_data, data_indices_len)
else:
raise TypeError("Expected float32 or float64 type for data")
out_indices = cint32_array_to_numpy(out_ptr_indices, data_indices_len)
# break up indptr based on number of rows (note more than one matrix in multiclass case)
per_class_indptr_shape = cs.indptr.shape[0]
# for CSC there is extra column added
if not is_csr:
per_class_indptr_shape += 1
out_indptr_arrays = np.split(out_indptr, out_indptr.shape[0] / per_class_indptr_shape)
# reformat output into a csr or csc matrix or list of csr or csc matrices
cs_output_matrices = []
offset = 0
for cs_indptr in out_indptr_arrays:
matrix_indptr_len = cs_indptr[cs_indptr.shape[0] - 1]
cs_indices = out_indices[offset + cs_indptr[0]:offset + matrix_indptr_len]
cs_data = out_data[offset + cs_indptr[0]:offset + matrix_indptr_len]
offset += matrix_indptr_len
# same shape as input csr or csc matrix except extra column for expected value
cs_shape = [cs.shape[0], cs.shape[1] + 1]
# note: make sure we copy data as it will be deallocated next
if is_csr:
cs_output_matrices.append(scipy.sparse.csr_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
else:
cs_output_matrices.append(scipy.sparse.csc_matrix((cs_data, cs_indices, cs_indptr), cs_shape))
# free the temporary native indptr, indices, and data
_safe_call(_LIB.LGBM_BoosterFreePredictSparse(out_ptr_indptr, out_ptr_indices, out_ptr_data,
ctypes.c_int(indptr_type), ctypes.c_int(data_type)))
if len(cs_output_matrices) == 1:
return cs_output_matrices[0]
return cs_output_matrices
def __pred_for_csr(self, csr, start_iteration, num_iteration, predict_type):
"""Predict for a CSR data."""
def inner_predict(csr, start_iteration, num_iteration, predict_type, preds=None):
nrow = len(csr.indptr) - 1
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSR(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def inner_predict_sparse(csr, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
csr_indices = csr.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSR
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.zeros(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csr, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=True)
nrow = len(csr.indptr) - 1
return matrices, nrow
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csr, start_iteration, num_iteration, predict_type)
nrow = len(csr.indptr) - 1
if nrow > MAX_INT32:
sections = [0] + list(np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)) + [nrow]
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(start_iteration, num_iteration, i, predict_type) for i in np.diff(sections)]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for (start_idx, end_idx), (start_idx_pred, end_idx_pred) in zip(zip(sections, sections[1:]),
zip(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(csr[start_idx:end_idx], start_iteration, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(csr, start_iteration, num_iteration, predict_type)
def __pred_for_csc(self, csc, start_iteration, num_iteration, predict_type):
"""Predict for a CSC data."""
def inner_predict_sparse(csc, start_iteration, num_iteration, predict_type):
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
csc_indices = csc.indices.astype(np.int32, copy=False)
matrix_type = C_API_MATRIX_TYPE_CSC
if type_ptr_indptr == C_API_DTYPE_INT32:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int32)()
else:
out_ptr_indptr = ctypes.POINTER(ctypes.c_int64)()
out_ptr_indices = ctypes.POINTER(ctypes.c_int32)()
if type_ptr_data == C_API_DTYPE_FLOAT32:
out_ptr_data = ctypes.POINTER(ctypes.c_float)()
else:
out_ptr_data = ctypes.POINTER(ctypes.c_double)()
out_shape = np.zeros(2, dtype=np.int64)
_safe_call(_LIB.LGBM_BoosterPredictSparseOutput(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.c_int(matrix_type),
out_shape.ctypes.data_as(ctypes.POINTER(ctypes.c_int64)),
ctypes.byref(out_ptr_indptr),
ctypes.byref(out_ptr_indices),
ctypes.byref(out_ptr_data)))
matrices = self.__create_sparse_native(csc, out_shape, out_ptr_indptr, out_ptr_indices, out_ptr_data,
type_ptr_indptr, type_ptr_data, is_csr=False)
nrow = csc.shape[0]
return matrices, nrow
nrow = csc.shape[0]
if nrow > MAX_INT32:
return self.__pred_for_csr(csc.tocsr(), start_iteration, num_iteration, predict_type)
if predict_type == C_API_PREDICT_CONTRIB:
return inner_predict_sparse(csc, start_iteration, num_iteration, predict_type)
n_preds = self.__get_num_preds(start_iteration, num_iteration, nrow, predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSC(
self.handle,
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
class Dataset:
"""Dataset in LightGBM."""
def __init__(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, silent=False,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""Initialize Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
reference : Dataset or None, optional (default=None)
If this is Dataset for validation, training data should be used as reference.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
params : dict or None, optional (default=None)
Other parameters for Dataset.
free_raw_data : bool, optional (default=True)
If True, raw data is freed after constructing inner Dataset.
"""
self.handle = None
self.data = data
self.label = label
self.reference = reference
self.weight = weight
self.group = group
self.init_score = init_score
self.silent = silent
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = deepcopy(params)
self.free_raw_data = free_raw_data
self.used_indices = None
self.need_slice = True
self._predictor = None
self.pandas_categorical = None
self.params_back_up = None
self.feature_penalty = None
self.monotone_constraints = None
self.version = 0
def __del__(self):
try:
self._free_handle()
except AttributeError:
pass
def get_params(self):
"""Get the used parameters in the Dataset.
Returns
-------
params : dict or None
The used parameters in this Dataset object.
"""
if self.params is not None:
# no min_data, nthreads and verbose in this function
dataset_params = _ConfigAliases.get("bin_construct_sample_cnt",
"categorical_feature",
"data_random_seed",
"enable_bundle",
"feature_pre_filter",
"forcedbins_filename",
"group_column",
"header",
"ignore_column",
"is_enable_sparse",
"label_column",
"linear_tree",
"max_bin",
"max_bin_by_feature",
"min_data_in_bin",
"pre_partition",
"two_round",
"use_missing",
"weight_column",
"zero_as_missing")
return {k: v for k, v in self.params.items() if k in dataset_params}
def _free_handle(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_DatasetFree(self.handle))
self.handle = None
self.need_slice = True
if self.used_indices is not None:
self.data = None
return self
def _set_init_score_by_predictor(self, predictor, data, used_indices=None):
data_has_header = False
if isinstance(data, str):
# check data has header or not
data_has_header = any(self.params.get(alias, False) for alias in _ConfigAliases.get("header"))
num_data = self.num_data()
if predictor is not None:
init_score = predictor.predict(data,
raw_score=True,
data_has_header=data_has_header,
is_reshape=False)
if used_indices is not None:
assert not self.need_slice
if isinstance(data, str):
sub_init_score = np.zeros(num_data * predictor.num_class, dtype=np.float32)
assert num_data == len(used_indices)
for i in range(len(used_indices)):
for j in range(predictor.num_class):
sub_init_score[i * predictor.num_class + j] = init_score[used_indices[i] * predictor.num_class + j]
init_score = sub_init_score
if predictor.num_class > 1:
# need to regroup init_score
new_init_score = np.zeros(init_score.size, dtype=np.float32)
for i in range(num_data):
for j in range(predictor.num_class):
new_init_score[j * num_data + i] = init_score[i * predictor.num_class + j]
init_score = new_init_score
elif self.init_score is not None:
init_score = np.zeros(self.init_score.shape, dtype=np.float32)
else:
return self
self.set_init_score(init_score)
def _lazy_init(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, predictor=None,
silent=False, feature_name='auto',
categorical_feature='auto', params=None):
if data is None:
self.handle = None
return self
if reference is not None:
self.pandas_categorical = reference.pandas_categorical
categorical_feature = reference.categorical_feature
data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data,
feature_name,
categorical_feature,
self.pandas_categorical)
label = _label_from_pandas(label)
# process for args
params = {} if params is None else params
args_names = (getattr(self.__class__, '_lazy_init')
.__code__
.co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount])
for key, _ in params.items():
if key in args_names:
_log_warning(f'{key} keyword has been found in `params` and will be ignored.\n'
f'Please use {key} argument of the Dataset constructor to pass this parameter.')
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
# get categorical features
if categorical_feature is not None:
categorical_indices = set()
feature_dict = {}
if feature_name is not None:
feature_dict = {name: i for i, name in enumerate(feature_name)}
for name in categorical_feature:
if isinstance(name, str) and name in feature_dict:
categorical_indices.add(feature_dict[name])
elif isinstance(name, int):
categorical_indices.add(name)
else:
raise TypeError(f"Wrong type({type(name).__name__}) or unknown name({name}) in categorical_feature")
if categorical_indices:
for cat_alias in _ConfigAliases.get("categorical_feature"):
if cat_alias in params:
_log_warning(f'{cat_alias} in param dict is overridden.')
params.pop(cat_alias, None)
params['categorical_column'] = sorted(categorical_indices)
params_str = param_dict_to_str(params)
self.params = params
# process for reference dataset
ref_dataset = None
if isinstance(reference, Dataset):
ref_dataset = reference.construct().handle
elif reference is not None:
raise TypeError('Reference dataset should be None or dataset instance')
# start construct data
if isinstance(data, str):
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromFile(
c_str(data),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
elif isinstance(data, scipy.sparse.csr_matrix):
self.__init_from_csr(data, params_str, ref_dataset)
elif isinstance(data, scipy.sparse.csc_matrix):
self.__init_from_csc(data, params_str, ref_dataset)
elif isinstance(data, np.ndarray):
self.__init_from_np2d(data, params_str, ref_dataset)
elif isinstance(data, list) and len(data) > 0 and all(isinstance(x, np.ndarray) for x in data):
self.__init_from_list_np2d(data, params_str, ref_dataset)
elif isinstance(data, dt_DataTable):
self.__init_from_np2d(data.to_numpy(), params_str, ref_dataset)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset)
except BaseException:
raise TypeError(f'Cannot initialize Dataset from {type(data).__name__}')
if label is not None:
self.set_label(label)
if self.get_label() is None:
raise ValueError("Label should not be None")
if weight is not None:
self.set_weight(weight)
if group is not None:
self.set_group(group)
if isinstance(predictor, _InnerPredictor):
if self._predictor is None and init_score is not None:
_log_warning("The init_score will be overridden by the prediction of init_model.")
self._set_init_score_by_predictor(predictor, data)
elif init_score is not None:
self.set_init_score(init_score)
elif predictor is not None:
raise TypeError(f'Wrong predictor type {type(predictor).__name__}')
# set feature names
return self.set_feature_name(feature_name)
def __init_from_np2d(self, mat, params_str, ref_dataset):
"""Initialize data from a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
self.handle = ctypes.c_void_p()
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetCreateFromMat(
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int32(mat.shape[0]),
ctypes.c_int32(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_list_np2d(self, mats, params_str, ref_dataset):
"""Initialize data from a list of 2-D numpy matrices."""
ncol = mats[0].shape[1]
nrow = np.zeros((len(mats),), np.int32)
if mats[0].dtype == np.float64:
ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
else:
ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()
holders = []
type_ptr_data = None
for i, mat in enumerate(mats):
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.shape[1] != ncol:
raise ValueError('Input arrays must have same number of columns')
nrow[i] = mat.shape[0]
if mat.dtype == np.float32 or mat.dtype == np.float64:
mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)
chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
raise ValueError('Input chunks must have same type')
ptr_data[i] = chunk_ptr_data
type_ptr_data = chunk_type_ptr_data
holders.append(holder)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromMats(
ctypes.c_int32(len(mats)),
ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.c_int(type_ptr_data),
nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int32(ncol),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csr(self, csr, params_str, ref_dataset):
"""Initialize data from a CSR matrix."""
if len(csr.indices) != len(csr.data):
raise ValueError(f'Length mismatch: {len(csr.indices)} vs {len(csr.data)}')
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr_indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSR(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csc(self, csc, params_str, ref_dataset):
"""Initialize data from a CSC matrix."""
if len(csc.indices) != len(csc.data):
raise ValueError(f'Length mismatch: {len(csc.indices)} vs {len(csc.data)}')
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc_indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSC(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def construct(self):
"""Lazy init.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
if self.handle is None:
if self.reference is not None:
reference_params = self.reference.get_params()
if self.get_params() != reference_params:
_log_warning('Overriding the parameters from Reference Dataset.')
self._update_params(reference_params)
if self.used_indices is None:
# create valid
self._lazy_init(self.data, label=self.label, reference=self.reference,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name, params=self.params)
else:
# construct subset
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
assert used_indices.flags.c_contiguous
if self.reference.group is not None:
group_info = np.array(self.reference.group).astype(np.int32, copy=False)
_, self.group = np.unique(np.repeat(range(len(group_info)), repeats=group_info)[self.used_indices],
return_counts=True)
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.params)
_safe_call(_LIB.LGBM_DatasetGetSubset(
self.reference.construct().handle,
used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int32(used_indices.shape[0]),
c_str(params_str),
ctypes.byref(self.handle)))
if not self.free_raw_data:
self.get_data()
if self.group is not None:
self.set_group(self.group)
if self.get_label() is None:
raise ValueError("Label should not be None.")
if isinstance(self._predictor, _InnerPredictor) and self._predictor is not self.reference._predictor:
self.get_data()
self._set_init_score_by_predictor(self._predictor, self.data, used_indices)
else:
# create train
self._lazy_init(self.data, label=self.label,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self
def create_valid(self, data, label=None, weight=None, group=None,
init_score=None, silent=False, params=None):
"""Create validation data align with current Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
params : dict or None, optional (default=None)
Other parameters for validation Dataset.
Returns
-------
valid : Dataset
Validation Dataset with reference to self.
"""
ret = Dataset(data, label=label, reference=self,
weight=weight, group=group, init_score=init_score,
silent=silent, params=params, free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
def subset(self, used_indices, params=None):
"""Get subset of current Dataset.
Parameters
----------
used_indices : list of int
Indices used to create the subset.
params : dict or None, optional (default=None)
These parameters will be passed to Dataset constructor.
Returns
-------
subset : Dataset
Subset of the current Dataset.
"""
if params is None:
params = self.params
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = sorted(used_indices)
return ret
def save_binary(self, filename):
"""Save Dataset to a binary file.
.. note::
Please note that `init_score` is not saved in binary file.
If you need it, please set it again after loading Dataset.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetSaveBinary(
self.construct().handle,
c_str(filename)))
return self
def _update_params(self, params):
if not params:
return self
params = deepcopy(params)
def update():
if not self.params:
self.params = params
else:
self.params_back_up = deepcopy(self.params)
self.params.update(params)
if self.handle is None:
update()
elif params is not None:
ret = _LIB.LGBM_DatasetUpdateParamChecking(
c_str(param_dict_to_str(self.params)),
c_str(param_dict_to_str(params)))
if ret != 0:
# could be updated if data is not freed
if self.data is not None:
update()
self._free_handle()
else:
raise LightGBMError(_LIB.LGBM_GetLastError().decode('utf-8'))
return self
def _reverse_update_params(self):
if self.handle is None:
self.params = deepcopy(self.params_back_up)
self.params_back_up = None
return self
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property.
"""
if self.handle is None:
raise Exception(f"Cannot set {field_name} before construct dataset")
if data is None:
# set to None
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return self
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32 or data.dtype == np.float64:
ptr_data, type_data, _ = c_float_array(data)
elif data.dtype == np.int32:
ptr_data, type_data, _ = c_int_array(data)
else:
raise TypeError(f"Expected np.float32/64 or np.int32, met type({data.dtype})")
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
self.version += 1
return self
def get_field(self, field_name):
"""Get property from the Dataset.
Parameters
----------
field_name : string
The field name of the information.
Returns
-------
info : numpy array
A numpy array with information from the Dataset.
"""
if self.handle is None:
raise Exception(f"Cannot get {field_name} before construct Dataset")
tmp_out_len = ctypes.c_int(0)
out_type = ctypes.c_int(0)
ret = ctypes.POINTER(ctypes.c_void_p)()
_safe_call(_LIB.LGBM_DatasetGetField(
self.handle,
c_str(field_name),
ctypes.byref(tmp_out_len),
ctypes.byref(ret),
ctypes.byref(out_type)))
if out_type.value != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Return type error for get_field")
if tmp_out_len.value == 0:
return None
if out_type.value == C_API_DTYPE_INT32:
return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT32:
return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT64:
return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
else:
raise TypeError("Unknown type")
def set_categorical_feature(self, categorical_feature):
"""Set categorical features.
Parameters
----------
categorical_feature : list of int or strings
Names or indices of categorical features.
Returns
-------
self : Dataset
Dataset with set categorical features.
"""
if self.categorical_feature == categorical_feature:
return self
if self.data is not None:
if self.categorical_feature is None:
self.categorical_feature = categorical_feature
return self._free_handle()
elif categorical_feature == 'auto':
_log_warning('Using categorical_feature in Dataset.')
return self
else:
_log_warning('categorical_feature in Dataset is overridden.\n'
f'New categorical_feature is {sorted(list(categorical_feature))}')
self.categorical_feature = categorical_feature
return self._free_handle()
else:
raise LightGBMError("Cannot set categorical feature after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def _set_predictor(self, predictor):
"""Set predictor for continued training.
It is not recommended for user to call this function.
Please use init_model argument in engine.train() or engine.cv() instead.
"""
if predictor is self._predictor and (predictor is None or predictor.current_iteration() == self._predictor.current_iteration()):
return self
if self.handle is None:
self._predictor = predictor
elif self.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.data)
elif self.used_indices is not None and self.reference is not None and self.reference.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.reference.data, self.used_indices)
else:
raise LightGBMError("Cannot set predictor after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self
def set_reference(self, reference):
"""Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
"""
self.set_categorical_feature(reference.categorical_feature) \
.set_feature_name(reference.feature_name) \
._set_predictor(reference._predictor)
# we're done if self and reference share a common upstrem reference
if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self
if self.data is not None:
self.reference = reference
return self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def set_feature_name(self, feature_name):
"""Set feature name.
Parameters
----------
feature_name : list of strings
Feature names.
Returns
-------
self : Dataset
Dataset with set feature name.
"""
if feature_name != 'auto':
self.feature_name = feature_name
if self.handle is not None and feature_name is not None and feature_name != 'auto':
if len(feature_name) != self.num_feature():
raise ValueError(f"Length of feature_name({len(feature_name)}) and num_feature({self.num_feature()}) don't match")
c_feature_name = [c_str(name) for name in feature_name]
_safe_call(_LIB.LGBM_DatasetSetFeatureNames(
self.handle,
c_array(ctypes.c_char_p, c_feature_name),
ctypes.c_int(len(feature_name))))
return self
def set_label(self, label):
"""Set label of Dataset.
Parameters
----------
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None
The label information to be set into Dataset.
Returns
-------
self : Dataset
Dataset with set label.
"""
self.label = label
if self.handle is not None:
label = list_to_1d_numpy(_label_from_pandas(label), name='label')
self.set_field('label', label)
self.label = self.get_field('label') # original values can be modified at cpp side
return self
def set_weight(self, weight):
"""Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point.
Returns
-------
self : Dataset
Dataset with set weight.
"""
if weight is not None and np.all(weight == 1):
weight = None
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
self.weight = self.get_field('weight') # original values can be modified at cpp side
return self
def set_init_score(self, init_score):
"""Set init score of Booster to start from.
Parameters
----------
init_score : list, numpy 1-D array, pandas Series or None
Init score for Booster.
Returns
-------
self : Dataset
Dataset with set init score.
"""
self.init_score = init_score
if self.handle is not None and init_score is not None:
init_score = list_to_1d_numpy(init_score, np.float64, name='init_score')
self.set_field('init_score', init_score)
self.init_score = self.get_field('init_score') # original values can be modified at cpp side
return self
def set_group(self, group):
"""Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
Returns
-------
self : Dataset
Dataset with set group.
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
return self
def get_feature_name(self):
"""Get the names of columns (features) in the Dataset.
Returns
-------
feature_names : list
The names of columns (features) in the Dataset.
"""
if self.handle is None:
raise LightGBMError("Cannot get feature_name before construct dataset")
num_feature = self.num_feature()
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_DatasetGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)]
def get_label(self):
"""Get the label of the Dataset.
Returns
-------
label : numpy array or None
The label information from the Dataset.
"""
if self.label is None:
self.label = self.get_field('label')
return self.label
def get_weight(self):
"""Get the weight of the Dataset.
Returns
-------
weight : numpy array or None
Weight for each data point from the Dataset.
"""
if self.weight is None:
self.weight = self.get_field('weight')
return self.weight
def get_init_score(self):
"""Get the initial score of the Dataset.
Returns
-------
init_score : numpy array or None
Init score of Booster.
"""
if self.init_score is None:
self.init_score = self.get_field('init_score')
return self.init_score
def get_data(self):
"""Get the raw data of the Dataset.
Returns
-------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None
Raw data used in the Dataset construction.
"""
if self.handle is None:
raise Exception("Cannot get data before construct Dataset")
if self.need_slice and self.used_indices is not None and self.reference is not None:
self.data = self.reference.data
if self.data is not None:
if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data):
self.data = self.data[self.used_indices, :]
elif isinstance(self.data, pd_DataFrame):
self.data = self.data.iloc[self.used_indices].copy()
elif isinstance(self.data, dt_DataTable):
self.data = self.data[self.used_indices, :]
else:
_log_warning(f"Cannot subset {type(self.data).__name__} type of raw data.\n"
"Returning original raw data")
self.need_slice = False
if self.data is None:
raise LightGBMError("Cannot call `get_data` after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self.data
def get_group(self):
"""Get the group of the Dataset.
Returns
-------
group : numpy array or None
Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10, 10]``, that means that you have 6 groups,
where the first 10 records are in the first group, records 11-30 are in the second group, records 31-70 are in the third group, etc.
"""
if self.group is None:
self.group = self.get_field('group')
if self.group is not None:
# group data from LightGBM is boundaries data, need to convert to group size
self.group = np.diff(self.group)
return self.group
def num_data(self):
"""Get the number of rows in the Dataset.
Returns
-------
number_of_rows : int
The number of rows in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_data before construct dataset")
def num_feature(self):
"""Get the number of columns (features) in the Dataset.
Returns
-------
number_of_columns : int
The number of columns (features) in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int(0)
_safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_feature before construct dataset")
def get_ref_chain(self, ref_limit=100):
"""Get a chain of Dataset objects.
Starts with r, then goes to r.reference (if exists),
then to r.reference.reference, etc.
until we hit ``ref_limit`` or a reference loop.
Parameters
----------
ref_limit : int, optional (default=100)
The limit number of references.
Returns
-------
ref_chain : set of Dataset
Chain of references of the Datasets.
"""
head = self
ref_chain = set()
while len(ref_chain) < ref_limit:
if isinstance(head, Dataset):
ref_chain.add(head)
if (head.reference is not None) and (head.reference not in ref_chain):
head = head.reference
else:
break
else:
break
return ref_chain
def add_features_from(self, other):
"""Add features from other Dataset to the current Dataset.
Both Datasets must be constructed before calling this method.
Parameters
----------
other : Dataset
The Dataset to take features from.
Returns
-------
self : Dataset
Dataset with the new features added.
"""
if self.handle is None or other.handle is None:
raise ValueError('Both source and target Datasets must be constructed before adding features')
_safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle))
was_none = self.data is None
old_self_data_type = type(self.data).__name__
if other.data is None:
self.data = None
elif self.data is not None:
if isinstance(self.data, np.ndarray):
if isinstance(other.data, np.ndarray):
self.data = np.hstack((self.data, other.data))
elif scipy.sparse.issparse(other.data):
self.data = np.hstack((self.data, other.data.toarray()))
elif isinstance(other.data, pd_DataFrame):
self.data = np.hstack((self.data, other.data.values))
elif isinstance(other.data, dt_DataTable):
self.data = np.hstack((self.data, other.data.to_numpy()))
else:
self.data = None
elif scipy.sparse.issparse(self.data):
sparse_format = self.data.getformat()
if isinstance(other.data, np.ndarray) or scipy.sparse.issparse(other.data):
self.data = scipy.sparse.hstack((self.data, other.data), format=sparse_format)
elif isinstance(other.data, pd_DataFrame):
self.data = scipy.sparse.hstack((self.data, other.data.values), format=sparse_format)
elif isinstance(other.data, dt_DataTable):
self.data = scipy.sparse.hstack((self.data, other.data.to_numpy()), format=sparse_format)
else:
self.data = None
elif isinstance(self.data, pd_DataFrame):
if not PANDAS_INSTALLED:
raise LightGBMError("Cannot add features to DataFrame type of raw data "
"without pandas installed. "
"Install pandas and restart your session.")
if isinstance(other.data, np.ndarray):
self.data = concat((self.data, pd_DataFrame(other.data)),
axis=1, ignore_index=True)
elif scipy.sparse.issparse(other.data):
self.data = concat((self.data, pd_DataFrame(other.data.toarray())),
axis=1, ignore_index=True)
elif isinstance(other.data, pd_DataFrame):
self.data = concat((self.data, other.data),
axis=1, ignore_index=True)
elif isinstance(other.data, dt_DataTable):
self.data = concat((self.data, pd_DataFrame(other.data.to_numpy())),
axis=1, ignore_index=True)
else:
self.data = None
elif isinstance(self.data, dt_DataTable):
if isinstance(other.data, np.ndarray):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data)))
elif scipy.sparse.issparse(other.data):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.toarray())))
elif isinstance(other.data, pd_DataFrame):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.values)))
elif isinstance(other.data, dt_DataTable):
self.data = dt_DataTable(np.hstack((self.data.to_numpy(), other.data.to_numpy())))
else:
self.data = None
else:
self.data = None
if self.data is None:
err_msg = (f"Cannot add features from {type(other.data).__name__} type of raw data to "
f"{old_self_data_type} type of raw data.\n")
err_msg += ("Set free_raw_data=False when construct Dataset to avoid this"
if was_none else "Freeing raw data")
_log_warning(err_msg)
self.feature_name = self.get_feature_name()
_log_warning("Reseting categorical features.\n"
"You can set new categorical features via ``set_categorical_feature`` method")
self.categorical_feature = "auto"
self.pandas_categorical = None
return self
def _dump_text(self, filename):
"""Save Dataset to a text file.
This format cannot be loaded back in by LightGBM, but is useful for debugging purposes.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetDumpText(
self.construct().handle,
c_str(filename)))
return self
class Booster:
"""Booster in LightGBM."""
def __init__(self, params=None, train_set=None, model_file=None, model_str=None, silent=False):
"""Initialize the Booster.
Parameters
----------
params : dict or None, optional (default=None)
Parameters for Booster.
train_set : Dataset or None, optional (default=None)
Training dataset.
model_file : string or None, optional (default=None)
Path to the model file.
model_str : string or None, optional (default=None)
Model will be loaded from this string.
silent : bool, optional (default=False)
Whether to print messages during construction.
"""
self.handle = None
self.network = False
self.__need_reload_eval_info = True
self._train_data_name = "training"
self.__attr = {}
self.__set_objective_to_none = False
self.best_iteration = -1
self.best_score = {}
params = {} if params is None else deepcopy(params)
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
if train_set is not None:
# Training task
if not isinstance(train_set, Dataset):
raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}')
params = _choose_param_value(
main_param_name="machines",
params=params,
default_value=None
)
# if "machines" is given, assume user wants to do distributed learning, and set up network
if params["machines"] is None:
params.pop("machines", None)
else:
machines = params["machines"]
if isinstance(machines, str):
num_machines_from_machine_list = len(machines.split(','))
elif isinstance(machines, (list, set)):
num_machines_from_machine_list = len(machines)
machines = ','.join(machines)
else:
raise ValueError("Invalid machines in params.")
params = _choose_param_value(
main_param_name="num_machines",
params=params,
default_value=num_machines_from_machine_list
)
params = _choose_param_value(
main_param_name="local_listen_port",
params=params,
default_value=12400
)
self.set_network(
machines=machines,
local_listen_port=params["local_listen_port"],
listen_time_out=params.get("time_out", 120),
num_machines=params["num_machines"]
)
# construct booster object
train_set.construct()
# copy the parameters from train_set
params.update(train_set.get_params())
params_str = param_dict_to_str(params)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreate(
train_set.handle,
c_str(params_str),
ctypes.byref(self.handle)))
# save reference to data
self.train_set = train_set
self.valid_sets = []
self.name_valid_sets = []
self.__num_dataset = 1
self.__init_predictor = train_set._predictor
if self.__init_predictor is not None:
_safe_call(_LIB.LGBM_BoosterMerge(
self.handle,
self.__init_predictor.handle))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
# buffer for inner predict
self.__inner_predict_buffer = [None]
self.__is_predicted_cur_iter = [False]
self.__get_eval_info()
self.pandas_categorical = train_set.pandas_categorical
self.train_set_version = train_set.version
elif model_file is not None:
# Prediction task
out_num_iterations = ctypes.c_int(0)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif model_str is not None:
self.model_from_string(model_str, not silent)
else:
raise TypeError('Need at least one training dataset or model file or model string '
'to create Booster instance')
self.params = params
def __del__(self):
try:
if self.network:
self.free_network()
except AttributeError:
pass
try:
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
model_str = self.model_to_string(num_iteration=-1)
booster = Booster(model_str=model_str)
return booster
def __getstate__(self):
this = self.__dict__.copy()
handle = this['handle']
this.pop('train_set', None)
this.pop('valid_sets', None)
if handle is not None:
this["handle"] = self.model_to_string(num_iteration=-1)
return this
def __setstate__(self, state):
model_str = state.get('handle', None)
if model_str is not None:
handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(handle)))
state['handle'] = handle
self.__dict__.update(state)
def free_dataset(self):
"""Free Booster's Datasets.
Returns
-------
self : Booster
Booster without Datasets.
"""
self.__dict__.pop('train_set', None)
self.__dict__.pop('valid_sets', None)
self.__num_dataset = 0
return self
def _free_buffer(self):
self.__inner_predict_buffer = []
self.__is_predicted_cur_iter = []
return self
def set_network(
self,
machines: Union[List[str], Set[str], str],
local_listen_port: int = 12400,
listen_time_out: int = 120,
num_machines: int = 1
) -> "Booster":
"""Set the network configuration.
Parameters
----------
machines : list, set or string
Names of machines.
local_listen_port : int, optional (default=12400)
TCP listen port for local machines.
listen_time_out : int, optional (default=120)
Socket time-out in minutes.
num_machines : int, optional (default=1)
The number of machines for distributed learning application.
Returns
-------
self : Booster
Booster with set network.
"""
if isinstance(machines, (list, set)):
machines = ','.join(machines)
_safe_call(_LIB.LGBM_NetworkInit(c_str(machines),
ctypes.c_int(local_listen_port),
ctypes.c_int(listen_time_out),
ctypes.c_int(num_machines)))
self.network = True
return self
def free_network(self):
"""Free Booster's network.
Returns
-------
self : Booster
Booster with freed network.
"""
_safe_call(_LIB.LGBM_NetworkFree())
self.network = False
return self
def trees_to_dataframe(self):
"""Parse the fitted model and return in an easy-to-read pandas DataFrame.
The returned DataFrame has the following columns.
- ``tree_index`` : int64, which tree a node belongs to. 0-based, so a value of ``6``, for example, means "this node is in the 7th tree".
- ``node_depth`` : int64, how far a node is from the root of the tree. The root node has a value of ``1``, its direct children are ``2``, etc.
- ``node_index`` : string, unique identifier for a node.
- ``left_child`` : string, ``node_index`` of the child node to the left of a split. ``None`` for leaf nodes.
- ``right_child`` : string, ``node_index`` of the child node to the right of a split. ``None`` for leaf nodes.
- ``parent_index`` : string, ``node_index`` of this node's parent. ``None`` for the root node.
- ``split_feature`` : string, name of the feature used for splitting. ``None`` for leaf nodes.
- ``split_gain`` : float64, gain from adding this split to the tree. ``NaN`` for leaf nodes.
- ``threshold`` : float64, value of the feature used to decide which side of the split a record will go down. ``NaN`` for leaf nodes.
- ``decision_type`` : string, logical operator describing how to compare a value to ``threshold``.
For example, ``split_feature = "Column_10", threshold = 15, decision_type = "<="`` means that
records where ``Column_10 <= 15`` follow the left side of the split, otherwise follows the right side of the split. ``None`` for leaf nodes.
- ``missing_direction`` : string, split direction that missing values should go to. ``None`` for leaf nodes.
- ``missing_type`` : string, describes what types of values are treated as missing.
- ``value`` : float64, predicted value for this leaf node, multiplied by the learning rate.
- ``weight`` : float64 or int64, sum of hessian (second-order derivative of objective), summed over observations that fall in this node.
- ``count`` : int64, number of records in the training data that fall into this node.
Returns
-------
result : pandas DataFrame
Returns a pandas DataFrame of the parsed model.
"""
if not PANDAS_INSTALLED:
raise LightGBMError('This method cannot be run without pandas installed. '
'You must install pandas and restart your session to use this method.')
if self.num_trees() == 0:
raise LightGBMError('There are no trees in this Booster and thus nothing to parse')
def _is_split_node(tree):
return 'split_index' in tree.keys()
def create_node_record(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
def _get_node_index(tree, tree_index):
tree_num = f'{tree_index}-' if tree_index is not None else ''
is_split = _is_split_node(tree)
node_type = 'S' if is_split else 'L'
# if a single node tree it won't have `leaf_index` so return 0
node_num = tree.get('split_index' if is_split else 'leaf_index', 0)
return f"{tree_num}{node_type}{node_num}"
def _get_split_feature(tree, feature_names):
if _is_split_node(tree):
if feature_names is not None:
feature_name = feature_names[tree['split_feature']]
else:
feature_name = tree['split_feature']
else:
feature_name = None
return feature_name
def _is_single_node_tree(tree):
return set(tree.keys()) == {'leaf_value'}
# Create the node record, and populate universal data members
node = OrderedDict()
node['tree_index'] = tree_index
node['node_depth'] = node_depth
node['node_index'] = _get_node_index(tree, tree_index)
node['left_child'] = None
node['right_child'] = None
node['parent_index'] = parent_node
node['split_feature'] = _get_split_feature(tree, feature_names)
node['split_gain'] = None
node['threshold'] = None
node['decision_type'] = None
node['missing_direction'] = None
node['missing_type'] = None
node['value'] = None
node['weight'] = None
node['count'] = None
# Update values to reflect node type (leaf or split)
if _is_split_node(tree):
node['left_child'] = _get_node_index(tree['left_child'], tree_index)
node['right_child'] = _get_node_index(tree['right_child'], tree_index)
node['split_gain'] = tree['split_gain']
node['threshold'] = tree['threshold']
node['decision_type'] = tree['decision_type']
node['missing_direction'] = 'left' if tree['default_left'] else 'right'
node['missing_type'] = tree['missing_type']
node['value'] = tree['internal_value']
node['weight'] = tree['internal_weight']
node['count'] = tree['internal_count']
else:
node['value'] = tree['leaf_value']
if not _is_single_node_tree(tree):
node['weight'] = tree['leaf_weight']
node['count'] = tree['leaf_count']
return node
def tree_dict_to_node_list(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
node = create_node_record(tree,
node_depth=node_depth,
tree_index=tree_index,
feature_names=feature_names,
parent_node=parent_node)
res = [node]
if _is_split_node(tree):
# traverse the next level of the tree
children = ['left_child', 'right_child']
for child in children:
subtree_list = tree_dict_to_node_list(
tree[child],
node_depth=node_depth + 1,
tree_index=tree_index,
feature_names=feature_names,
parent_node=node['node_index'])
# In tree format, "subtree_list" is a list of node records (dicts),
# and we add node to the list.
res.extend(subtree_list)
return res
model_dict = self.dump_model()
feature_names = model_dict['feature_names']
model_list = []
for tree in model_dict['tree_info']:
model_list.extend(tree_dict_to_node_list(tree['tree_structure'],
tree_index=tree['tree_index'],
feature_names=feature_names))
return pd_DataFrame(model_list, columns=model_list[0].keys())
def set_train_data_name(self, name):
"""Set the name to the training Dataset.
Parameters
----------
name : string
Name for the training Dataset.
Returns
-------
self : Booster
Booster with set training Dataset name.
"""
self._train_data_name = name
return self
def add_valid(self, data, name):
"""Add validation data.
Parameters
----------
data : Dataset
Validation data.
name : string
Name of validation data.
Returns
-------
self : Booster
Booster with set validation data.
"""
if not isinstance(data, Dataset):
raise TypeError(f'Validation data should be Dataset instance, met {type(data).__name__}')
if data._predictor is not self.__init_predictor:
raise LightGBMError("Add validation data failed, "
"you should use same predictor for these data")
_safe_call(_LIB.LGBM_BoosterAddValidData(
self.handle,
data.construct().handle))
self.valid_sets.append(data)
self.name_valid_sets.append(name)
self.__num_dataset += 1
self.__inner_predict_buffer.append(None)
self.__is_predicted_cur_iter.append(False)
return self
def reset_parameter(self, params):
"""Reset parameters of Booster.
Parameters
----------
params : dict
New parameters for Booster.
Returns
-------
self : Booster
Booster with new parameters.
"""
params_str = param_dict_to_str(params)
if params_str:
_safe_call(_LIB.LGBM_BoosterResetParameter(
self.handle,
c_str(params_str)))
self.params.update(params)
return self
def update(self, train_set=None, fobj=None):
"""Update Booster for one iteration.
Parameters
----------
train_set : Dataset or None, optional (default=None)
Training data.
If None, last training data is used.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : list or numpy 1-D array
The predicted values.
Predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) of the loss
with respect to the elements of preds for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) of the loss
with respect to the elements of preds for each sample point.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Returns
-------
is_finished : bool
Whether the update was successfully finished.
"""
# need reset training data
if train_set is None and self.train_set_version != self.train_set.version:
train_set = self.train_set
is_the_same_train_set = False
else:
is_the_same_train_set = train_set is self.train_set and self.train_set_version == train_set.version
if train_set is not None and not is_the_same_train_set:
if not isinstance(train_set, Dataset):
raise TypeError(f'Training data should be Dataset instance, met {type(train_set).__name__}')
if train_set._predictor is not self.__init_predictor:
raise LightGBMError("Replace training data failed, "
"you should use same predictor for these data")
self.train_set = train_set
_safe_call(_LIB.LGBM_BoosterResetTrainingData(
self.handle,
self.train_set.construct().handle))
self.__inner_predict_buffer[0] = None
self.train_set_version = self.train_set.version
is_finished = ctypes.c_int(0)
if fobj is None:
if self.__set_objective_to_none:
raise LightGBMError('Cannot update due to null objective function.')
_safe_call(_LIB.LGBM_BoosterUpdateOneIter(
self.handle,
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return is_finished.value == 1
else:
if not self.__set_objective_to_none:
self.reset_parameter({"objective": "none"}).__set_objective_to_none = True
grad, hess = fobj(self.__inner_predict(0), self.train_set)
return self.__boost(grad, hess)
def __boost(self, grad, hess):
"""Boost Booster for one iteration with customized gradient statistics.
.. note::
Score is returned before any transformation,
e.g. it is raw margin instead of probability of positive class for binary task.
For multi-class task, the score is group by class_id first, then group by row_id.
If you want to get i-th row score in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Parameters
----------
grad : list or numpy 1-D array
The value of the first order derivative (gradient) of the loss
with respect to the elements of score for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) of the loss
with respect to the elements of score for each sample point.
Returns
-------
is_finished : bool
Whether the boost was successfully finished.
"""
grad = list_to_1d_numpy(grad, name='gradient')
hess = list_to_1d_numpy(hess, name='hessian')
assert grad.flags.c_contiguous
assert hess.flags.c_contiguous
if len(grad) != len(hess):
raise ValueError(f"Lengths of gradient({len(grad)}) and hessian({len(hess)}) don't match")
is_finished = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
self.handle,
grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return is_finished.value == 1
def rollback_one_iter(self):
"""Rollback one iteration.
Returns
-------
self : Booster
Booster with rolled back one iteration.
"""
_safe_call(_LIB.LGBM_BoosterRollbackOneIter(
self.handle))
self.__is_predicted_cur_iter = [False for _ in range(self.__num_dataset)]
return self
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
def num_model_per_iteration(self):
"""Get number of models per iteration.
Returns
-------
model_per_iter : int
The number of models per iteration.
"""
model_per_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumModelPerIteration(
self.handle,
ctypes.byref(model_per_iter)))
return model_per_iter.value
def num_trees(self):
"""Get number of weak sub-models.
Returns
-------
num_trees : int
The number of weak sub-models.
"""
num_trees = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumberOfTotalModel(
self.handle,
ctypes.byref(num_trees)))
return num_trees.value
def upper_bound(self):
"""Get upper bound value of a model.
Returns
-------
upper_bound : double
Upper bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetUpperBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def lower_bound(self):
"""Get lower bound value of a model.
Returns
-------
lower_bound : double
Lower bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLowerBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def eval(self, data, name, feval=None):
"""Evaluate for data.
Parameters
----------
data : Dataset
Data for the evaluating.
name : string
Name of the data.
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, eval_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
eval_data : Dataset
The evaluation dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
if not isinstance(data, Dataset):
raise TypeError("Can only eval for Dataset instance")
data_idx = -1
if data is self.train_set:
data_idx = 0
else:
for i in range(len(self.valid_sets)):
if data is self.valid_sets[i]:
data_idx = i + 1
break
# need to push new valid data
if data_idx == -1:
self.add_valid(data, name)
data_idx = self.__num_dataset - 1
return self.__inner_eval(name, data_idx, feval)
def eval_train(self, feval=None):
"""Evaluate for training data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return self.__inner_eval(self._train_data_name, 0, feval)
def eval_valid(self, feval=None):
"""Evaluate for validation data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, valid_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
If ``fobj`` is specified, predicted values are returned before any transformation,
e.g. they are raw margin instead of probability of positive class for binary task in this case.
valid_data : Dataset
The validation dataset.
eval_name : string
The name of evaluation function (without whitespace).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return [item for i in range(1, self.__num_dataset)
for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
def save_model(self, filename, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to file.
Parameters
----------
filename : string
Filename to save Booster.
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : string, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
self : Booster
Returns self.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
_safe_call(_LIB.LGBM_BoosterSaveModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
c_str(filename)))
_dump_pandas_categorical(self.pandas_categorical, filename)
return self
def shuffle_models(self, start_iteration=0, end_iteration=-1):
"""Shuffle models.
Parameters
----------
start_iteration : int, optional (default=0)
The first iteration that will be shuffled.
end_iteration : int, optional (default=-1)
The last iteration that will be shuffled.
If <= 0, means the last available iteration.
Returns
-------
self : Booster
Booster with shuffled models.
"""
_safe_call(_LIB.LGBM_BoosterShuffleModels(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(end_iteration)))
return self
def model_from_string(self, model_str, verbose=True):
"""Load Booster from a string.
Parameters
----------
model_str : string
Model will be loaded from this string.
verbose : bool, optional (default=True)
Whether to print messages while loading model.
Returns
-------
self : Booster
Loaded Booster object.
"""
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
self._free_buffer()
self.handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
if verbose:
_log_info(f'Finished loading model, total used {int(out_num_iterations.value)} iterations')
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_str=model_str)
return self
def model_to_string(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Save Booster to string.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
importance_type : string, optional (default="split")
What type of feature importance should be saved.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
str_repr : string
String representation of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, re-allocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = string_buffer.value.decode('utf-8')
ret += _dump_pandas_categorical(self.pandas_categorical)
return ret
def dump_model(self, num_iteration=None, start_iteration=0, importance_type='split'):
"""Dump Booster to JSON format.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be dumped.
If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
If <= 0, all iterations are dumped.
start_iteration : int, optional (default=0)
Start index of the iteration that should be dumped.
importance_type : string, optional (default="split")
What type of feature importance should be dumped.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
Returns
-------
json_repr : dict
JSON format of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, reallocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int(importance_type_int),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = json.loads(string_buffer.value.decode('utf-8'))
ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical,
default=json_default_with_numpy))
return ret
def predict(self, data, start_iteration=0, num_iteration=None,
raw_score=False, pred_leaf=False, pred_contrib=False,
data_has_header=False, is_reshape=True, **kwargs):
"""Make a prediction.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
If string, it represents the path to txt file.
start_iteration : int, optional (default=0)
Start index of the iteration to predict.
If <= 0, starts from the first iteration.
num_iteration : int or None, optional (default=None)
Total number of iterations used in the prediction.
If None, if the best iteration exists and start_iteration <= 0, the best iteration is used;
otherwise, all iterations from ``start_iteration`` are used (no limits).
If <= 0, all iterations from ``start_iteration`` are used (no limits).
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
.. note::
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
data_has_header : bool, optional (default=False)
Whether the data has header.
Used only if data is string.
is_reshape : bool, optional (default=True)
If True, result is reshaped to [nrow, ncol].
**kwargs
Other parameters for the prediction.
Returns
-------
result : numpy array, scipy.sparse or list of scipy.sparse
Prediction result.
Can be sparse or a list of sparse objects (each element represents predictions for one class) for feature contributions (when ``pred_contrib=True``).
"""
predictor = self._to_predictor(deepcopy(kwargs))
if num_iteration is None:
if start_iteration <= 0:
num_iteration = self.best_iteration
else:
num_iteration = -1
return predictor.predict(data, start_iteration, num_iteration,
raw_score, pred_leaf, pred_contrib,
data_has_header, is_reshape)
def refit(self, data, label, decay_rate=0.9, **kwargs):
"""Refit the existing Booster by new data.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for refit.
If string, it represents the path to txt file.
label : list, numpy 1-D array or pandas Series / one-column DataFrame
Label for refit.
decay_rate : float, optional (default=0.9)
Decay rate of refit,
will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees.
**kwargs
Other parameters for refit.
These parameters will be passed to ``predict`` method.
Returns
-------
result : Booster
Refitted Booster.
"""
if self.__set_objective_to_none:
raise LightGBMError('Cannot refit due to null objective function.')
predictor = self._to_predictor(deepcopy(kwargs))
leaf_preds = predictor.predict(data, -1, pred_leaf=True)
nrow, ncol = leaf_preds.shape
out_is_linear = ctypes.c_bool(False)
_safe_call(_LIB.LGBM_BoosterGetLinear(
self.handle,
ctypes.byref(out_is_linear)))
new_params = deepcopy(self.params)
new_params["linear_tree"] = out_is_linear.value
train_set = Dataset(data, label, silent=True, params=new_params)
new_params['refit_decay_rate'] = decay_rate
new_booster = Booster(new_params, train_set)
# Copy models
_safe_call(_LIB.LGBM_BoosterMerge(
new_booster.handle,
predictor.handle))
leaf_preds = leaf_preds.reshape(-1)
ptr_data, _, _ = c_int_array(leaf_preds)
_safe_call(_LIB.LGBM_BoosterRefit(
new_booster.handle,
ptr_data,
ctypes.c_int32(nrow),
ctypes.c_int32(ncol)))
new_booster.network = self.network
new_booster.__attr = self.__attr.copy()
return new_booster
def get_leaf_output(self, tree_id, leaf_id):
"""Get the output of a leaf.
Parameters
----------
tree_id : int
The index of the tree.
leaf_id : int
The index of the leaf in the tree.
Returns
-------
result : float
The output of the leaf.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLeafValue(
self.handle,
ctypes.c_int(tree_id),
ctypes.c_int(leaf_id),
ctypes.byref(ret)))
return ret.value
def _to_predictor(self, pred_parameter=None):
"""Convert to predictor."""
predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter)
predictor.pandas_categorical = self.pandas_categorical
return predictor
def num_feature(self):
"""Get number of features.
Returns
-------
num_feature : int
The number of features.
"""
out_num_feature = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumFeature(
self.handle,
ctypes.byref(out_num_feature)))
return out_num_feature.value
def feature_name(self):
"""Get names of features.
Returns
-------
result : list
List with names of features.
"""
num_feature = self.num_feature()
# Get name of features
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
ctypes.c_int(num_feature),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
return [string_buffers[i].value.decode('utf-8') for i in range(num_feature)]
def feature_importance(self, importance_type='split', iteration=None):
"""Get feature importances.
Parameters
----------
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
iteration : int or None, optional (default=None)
Limit number of iterations in the feature importance calculation.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
Returns
-------
result : numpy array
Array with feature importances.
"""
if iteration is None:
iteration = self.best_iteration
importance_type_int = FEATURE_IMPORTANCE_TYPE_MAPPER[importance_type]
result = np.zeros(self.num_feature(), dtype=np.float64)
_safe_call(_LIB.LGBM_BoosterFeatureImportance(
self.handle,
ctypes.c_int(iteration),
ctypes.c_int(importance_type_int),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if importance_type_int == 0:
return result.astype(np.int32)
else:
return result
def get_split_value_histogram(self, feature, bins=None, xgboost_style=False):
"""Get split value histogram for the specified feature.
Parameters
----------
feature : int or string
The feature name or index the histogram is calculated for.
If int, interpreted as index.
If string, interpreted as name.
.. warning::
Categorical features are not supported.
bins : int, string or None, optional (default=None)
The maximum number of bins.
If None, or int and > number of unique split values and ``xgboost_style=True``,
the number of bins equals number of unique split values.
If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.
xgboost_style : bool, optional (default=False)
Whether the returned result should be in the same form as it is in XGBoost.
If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function.
If True, the returned value is matrix, in which the first column is the right edges of non-empty bins
and the second one is the histogram values.
Returns
-------
result_tuple : tuple of 2 numpy arrays
If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature
and the bin edges.
result_array_like : numpy array or pandas DataFrame (if pandas is installed)
If ``xgboost_style=True``, the histogram of used splitting values for the specified feature.
"""
def add(root):
"""Recursively add thresholds."""
if 'split_index' in root: # non-leaf
if feature_names is not None and isinstance(feature, str):
split_feature = feature_names[root['split_feature']]
else:
split_feature = root['split_feature']
if split_feature == feature:
if isinstance(root['threshold'], str):
raise LightGBMError('Cannot compute split value histogram for the categorical feature')
else:
values.append(root['threshold'])
add(root['left_child'])
add(root['right_child'])
model = self.dump_model()
feature_names = model.get('feature_names')
tree_infos = model['tree_info']
values = []
for tree_info in tree_infos:
add(tree_info['tree_structure'])
if bins is None or isinstance(bins, int) and xgboost_style:
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
hist, bin_edges = np.histogram(values, bins=bins)
if xgboost_style:
ret = np.column_stack((bin_edges[1:], hist))
ret = ret[ret[:, 1] > 0]
if PANDAS_INSTALLED:
return pd_DataFrame(ret, columns=['SplitValue', 'Count'])
else:
return ret
else:
return hist, bin_edges
def __inner_eval(self, data_name, data_idx, feval=None):
"""Evaluate training or validation data."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
self.__get_eval_info()
ret = []
if self.__num_inner_eval > 0:
result = np.zeros(self.__num_inner_eval, dtype=np.float64)
tmp_out_len = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetEval(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if tmp_out_len.value != self.__num_inner_eval:
raise ValueError("Wrong length of eval results")
for i in range(self.__num_inner_eval):
ret.append((data_name, self.__name_inner_eval[i],
result[i], self.__higher_better_inner_eval[i]))
if callable(feval):
feval = [feval]
if feval is not None:
if data_idx == 0:
cur_data = self.train_set
else:
cur_data = self.valid_sets[data_idx - 1]
for eval_function in feval:
if eval_function is None:
continue
feval_ret = eval_function(self.__inner_predict(data_idx), cur_data)
if isinstance(feval_ret, list):
for eval_name, val, is_higher_better in feval_ret:
ret.append((data_name, eval_name, val, is_higher_better))
else:
eval_name, val, is_higher_better = feval_ret
ret.append((data_name, eval_name, val, is_higher_better))
return ret
def __inner_predict(self, data_idx):
"""Predict for training and validation dataset."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
if self.__inner_predict_buffer[data_idx] is None:
if data_idx == 0:
n_preds = self.train_set.num_data() * self.__num_class
else:
n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
self.__inner_predict_buffer[data_idx] = np.zeros(n_preds, dtype=np.float64)
# avoid to predict many time in one iteration
if not self.__is_predicted_cur_iter[data_idx]:
tmp_out_len = ctypes.c_int64(0)
data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double))
_safe_call(_LIB.LGBM_BoosterGetPredict(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
data_ptr))
if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
raise ValueError(f"Wrong length of predict results for data {data_idx}")
self.__is_predicted_cur_iter[data_idx] = True
return self.__inner_predict_buffer[data_idx]
def __get_eval_info(self):
"""Get inner evaluation count and names."""
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
# Get num of inner evals
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
# Get name of eval metrics
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [
ctypes.create_string_buffer(reserved_string_buffer_size) for _ in range(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.c_int(self.__num_inner_eval),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(reserved_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesn't equal with num_evals")
actual_string_buffer_size = required_string_buffer_size.value
# if buffer length is not long enough, reallocate buffers
if reserved_string_buffer_size < actual_string_buffer_size:
string_buffers = [
ctypes.create_string_buffer(actual_string_buffer_size) for _ in range(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
ctypes.c_int(self.__num_inner_eval),
ctypes.byref(tmp_out_len),
ctypes.c_size_t(actual_string_buffer_size),
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
self.__name_inner_eval = [
string_buffers[i].value.decode('utf-8') for i in range(self.__num_inner_eval)
]
self.__higher_better_inner_eval = [
name.startswith(('auc', 'ndcg@', 'map@', 'average_precision')) for name in self.__name_inner_eval
]
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : string
The name of the attribute.
Returns
-------
value : string or None
The attribute value.
Returns None if attribute does not exist.
"""
return self.__attr.get(key, None)
def set_attr(self, **kwargs):
"""Set attributes to the Booster.
Parameters
----------
**kwargs
The attributes to set.
Setting a value to None deletes an attribute.
Returns
-------
self : Booster
Booster with set attributes.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, str):
raise ValueError("Only string values are accepted")
self.__attr[key] = value
else:
self.__attr.pop(key, None)
return self
|
mit
| 6,481,589,992,037,159,000
| 41.512168
| 161
| 0.553914
| false
| 4.200816
| false
| false
| false
|
llvm/llvm-zorg
|
zorg/buildbot/builders/AnnotatedBuilder.py
|
1
|
3401
|
from buildbot.process.properties import WithProperties
from buildbot.steps.shell import SetProperty
from zorg.buildbot.commands.AnnotatedCommand import AnnotatedCommand
from zorg.buildbot.process.factory import LLVMBuildFactory
def getAnnotatedBuildFactory(
script,
clean=False,
depends_on_projects=None,
env=None,
extra_args=None,
timeout=1200,
checkout_llvm_sources=True):
"""
Returns a new build factory that uses AnnotatedCommand, which
allows the build to be run by version-controlled scripts that do
not require a buildmaster restart to update.
script: script under "builders/annotated" to be run by python
clean: set to true for a clean build of llvm
depends_on_projects: which subprojects to enable
llvm must be first in the list
(default: ["llvm", "clang", "compiler-rt", "libcxx",
"libcxxabi", "libunwind", "lld"])
env: environment overrides (map; default is no overrides)
extra_args: extra arguments to pass to the script (default: [])
timeout: specifies the builder's timeout in seconds (default: 1200)
"""
if depends_on_projects is None:
depends_on_projects = [
"llvm",
"clang",
"compiler-rt",
"libcxx",
"libcxxabi",
"libunwind",
"lld"]
if extra_args is None:
extra_args = []
f = LLVMBuildFactory(
clean=clean,
depends_on_projects=depends_on_projects)
if clean:
f.addStep(SetProperty(property='clean', command='echo 1'))
# We normally use the clean property to indicate that we want a
# clean build, but AnnotatedCommand uses the clobber property
# instead. Therefore, set clobber if clean is set to a truthy
# value. This will cause AnnotatedCommand to set
# BUILDBOT_CLOBBER=1 in the environment, which is how we
# communicate to the script that we need a clean build.
f.addStep(SetProperty(
property='clobber',
command='echo 1',
doStepIf=lambda step: step.build.getProperty('clean', False)))
merged_env = {
'TERM': 'dumb' # Be cautious and disable color output from all tools.
}
if env is not None:
# Overwrite pre-set items with the given ones, so user can set
# anything.
merged_env.update(env)
scripts_dir = "annotated"
# Check out zorg so we can run the annotator scripts.
f.addGetSourcecodeForProject(
name='update-annotated-scripts',
project='zorg',
src_dir='llvm-zorg',
alwaysUseLatest=True)
if checkout_llvm_sources:
f.addGetSourcecodeSteps()
extra_args_with_props = [WithProperties(arg) for arg in extra_args]
# Explicitly use '/' as separator, because it works on *nix and Windows.
if script.startswith('/'):
command = [script]
else:
script_path = "../llvm-zorg/zorg/buildbot/builders/annotated/%s" % (script)
command = ["python", script_path, WithProperties("--jobs=%(jobs:-)s")]
command += extra_args_with_props
f.addStep(AnnotatedCommand(name="annotate",
description="annotate",
timeout=timeout,
haltOnFailure=True,
command=command,
env=merged_env))
return f
|
apache-2.0
| 84,255,668,484,032,700
| 34.427083
| 81
| 0.628051
| false
| 4.077938
| false
| false
| false
|
cbitterfield/JobCard
|
archive/bulkvideosize.py
|
1
|
3578
|
#!/opt/local/bin/python
# encoding: utf-8
'''
bulkvideosize -- shortdesc
bulkvideosize is a description
It defines classes_and_methods
@author: user_name
@copyright: 2017 organization_name. All rights reserved.
@license: license
@contact: user_email
@deffield updated: Updated
'''
import sys
import os
import argparse
__all__ = []
__version__ = 0.1
__date__ = '2017-10-20'
__updated__ = '2017-10-20'
DEBUG = 1
TESTRUN = 0
PROFILE = 0
program_name = os.path.basename(sys.argv[0])
# Setup argument parser
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true", help="Display detailed debugging information")
parser.add_argument("-l","--logfile", action="store", help="Write Logfile if ommitted write to STDOUT")
parser.add_argument("-s","--source", action="store", help="Source Directory")
# Process arguments
args = parser.parse_args()
verbose = args.verbose
path = args.source
logfile = args.logfile
if verbose > 0:
print("Verbose mode on")
## Use Get Video Size Function
def getvideosize(src):
import shlex
import os
from string import Template
import subprocess
import datetime
FFPROBE="/opt/local/bin/ffprobe"
Error = False
log_text = open(logfile, "w")
for video in os.listdir(src):
if video.endswith(".mp4"):
CMD_TEMPLATE = "$FFPROBE -v error -of flat=s=_ -select_streams v:0 -show_entries stream=height,width,bit_rate,duration '$VIDEO'"
CMD = Template(CMD_TEMPLATE).safe_substitute(FFPROBE=FFPROBE, VIDEO=src + "/" + video)
videoName = os.path.basename(video)
pathName = os.path.dirname(src + "/" + video)
#print("Get the Video Size Information for Video: " + videoName )
#print("Source Dir:" + pathName )
#print("getVideoSizeCMD:\n " )
pCMD = shlex.split(CMD)
#print("Command:" + CMD)
try:
result=subprocess.check_output(pCMD)
cWidth = result.splitlines(True)[0]
cHeight = result.splitlines(True)[1]
cDuration = result.splitlines(True)[2]
cBit_Rate = result.splitlines(True)[3]
lWidth = cWidth.split("=")[1]
lHeight = cHeight.split("=")[1]
lDuration = cDuration.split("=")[1]
lBitRate = cBit_Rate.split("=")[1]
Width = lWidth.replace('\n','')
Height = lHeight.replace('\n','')
Duration = lDuration.replace('\n','')
BitRate = lBitRate.replace('\n','')
Duration = Duration.replace('"','')
BitRate = BitRate.replace('"','')
sizeofVideo = str(Width) + "x" + str(Height)
myduration = str(datetime.timedelta(seconds=int(float(Duration))))
mybitrate = str(int(BitRate)/1000)
#print("Video Source: " + video + " Size: " + sizeofVideo + " Duration: " + myduration + " BitRate:" + mybitrate + " kbps" )
log_text.write("Video Source: " + video + " Size: " + sizeofVideo + " Duration: " + myduration + " BitRate:" + mybitrate + " kbps\n")
except:
#print("Video Source: " + video + "ERROR")
log_text.write("Video Source: " + video + "ERROR\n")
log_text.close()
return(Error, sizeofVideo, Duration, BitRate)
myError, mySize, myDuraction, myBitrate = getvideosize(path)
|
gpl-3.0
| -261,463,053,978,957,760
| 28.089431
| 149
| 0.58161
| false
| 3.68866
| false
| false
| false
|
Dapid/pywt
|
demo/dwt_signal_decomposition.py
|
1
|
1789
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
import pywt
ecg = np.load(os.path.join('data', 'ecg.npy'))
data1 = np.concatenate((np.arange(1, 400),
np.arange(398, 600),
np.arange(601, 1024)))
x = np.linspace(0.082, 2.128, num=1024)[::-1]
data2 = np.sin(40 * np.log(x)) * np.sign((np.log(x)))
mode = pywt.MODES.sp1
def plot_signal_decomp(data, w, title):
"""Decompose and plot a signal S.
S = An + Dn + Dn-1 + ... + D1
"""
w = pywt.Wavelet(w)
a = data
ca = []
cd = []
for i in range(5):
(a, d) = pywt.dwt(a, w, mode)
ca.append(a)
cd.append(d)
rec_a = []
rec_d = []
for i, coeff in enumerate(ca):
coeff_list = [coeff, None] + [None] * i
rec_a.append(pywt.waverec(coeff_list, w))
for i, coeff in enumerate(cd):
coeff_list = [None, coeff] + [None] * i
rec_d.append(pywt.waverec(coeff_list, w))
fig = plt.figure()
ax_main = fig.add_subplot(len(rec_a) + 1, 1, 1)
ax_main.set_title(title)
ax_main.plot(data)
ax_main.set_xlim(0, len(data) - 1)
for i, y in enumerate(rec_a):
ax = fig.add_subplot(len(rec_a) + 1, 2, 3 + i * 2)
ax.plot(y, 'r')
ax.set_xlim(0, len(y) - 1)
ax.set_ylabel("A%d" % (i + 1))
for i, y in enumerate(rec_d):
ax = fig.add_subplot(len(rec_d) + 1, 2, 4 + i * 2)
ax.plot(y, 'g')
ax.set_xlim(0, len(y) - 1)
ax.set_ylabel("D%d" % (i + 1))
plot_signal_decomp(data1, 'coif5', "DWT: Signal irregularity")
plot_signal_decomp(data2, 'sym5', "DWT: Frequency and phase change - Symmlets5")
plot_signal_decomp(ecg, 'sym5', "DWT: Ecg sample - Symmlets5")
plt.show()
|
mit
| 5,571,234,884,135,789,000
| 23.847222
| 80
| 0.536613
| false
| 2.596517
| false
| false
| false
|
danrg/RGT-tool
|
src/RGT/XML/SVG/Attribs/xlinkAttributes.py
|
1
|
4217
|
from RGT.XML.SVG.Attribs.basicSvgAttribute import BasicSvgAttribute
from types import StringType
class XlinkAttributes(BasicSvgAttribute):
ATTRIBUTE_XLINK_HREF = 'xlink:href'
ATTRIBUTE_XLINK_SHOW = 'xlink:show'
ATTRIBUTE_XLINK_ACTUATE = 'xlink:actuate'
ATTRIBUTE_XLINK_TYPE = 'xlink:type'
ATTRIBUTE_XLINK_ROLE = 'xlink:role'
ATTRIBUTE_XLINK_ARCROLE = 'xlink:arcrole'
ATTRIBUTE_XLINK_TITLE = 'xlink:title'
def __init__(self):
BasicSvgAttribute.__init__(self)
def setXlinkHref(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_XLINK_HREF, data)
def setXlinkShow(self, data):
allowedValues = ['new', 'replace', 'embed', 'other', 'none']
if data is not None:
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_XLINK_SHOW, data)
def setXlinkActuate(self, data):
allowedValues = ['onLoad']
if data is not None:
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_XLINK_ACTUATE, data)
def setXlinkType(self, data):
allowedValues = ['simple']
if data is not None:
if data not in allowedValues:
values = ''
for value in allowedValues:
values += value + ', '
values = values[0: len(values) - 2]
raise ValueError('Value not allowed, only ' + values + 'are allowed')
else:
self._setNodeAttribute(self.ATTRIBUTE_XLINK_TYPE, data)
def setXlinkRole(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_XLINK_ROLE, data)
def setXlinkArcrole(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_XLINK_ARCROLE, data)
def setXlinkTitle(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_XLINK_TITLE, data)
def getXlinkHref(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_HREF)
if node is not None:
return node.nodeValue
return None
def getXlinkShow(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_SHOW)
if node is not None:
return node.nodeValue
return None
def getXlinkActuate(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_ACTUATE)
if node is not None:
return node.nodeValue
return None
def getXlinkType(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_TYPE)
if node is not None:
return node.nodeValue
return None
def getXlinkRole(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_ROLE)
if node is not None:
return node.nodeValue
return None
def getXlinkArcrole(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_ARCROLE)
if node is not None:
return node.nodeValue
return None
def getXlinkTitle(self):
node = self._getNodeAttribute(self.ATTRIBUTE_XLINK_TITLE)
if node is not None:
return node.nodeValue
return None
|
mit
| 2,037,298,200,612,529,700
| 32.867769
| 85
| 0.564857
| false
| 4.183532
| false
| false
| false
|
hsnr-gamera/gamera
|
gamera/gui/gaoptimizer/StopCriteriaPanel.py
|
1
|
4654
|
#
# Copyright (C) 2012 Tobias Bolten
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import wx
from gamera.gui import compat_wx
from gamera.gui.gaoptimizer.ExpertSettingPanel import *
#-------------------------------------------------------------------------------
class StopCriteriaPanel(ExpertSettingPanel):
#-------------------------------------------------------------------------------
#---------------------------------------------------------------------------
def __init__(self, parent, id):
#---------------------------------------------------------------------------
ExpertSettingPanel.__init__(self, parent, id)
sizer = wx.GridBagSizer(hgap=5, vgap=5)
self.SetSizer(sizer)
# best fitness
self.bestFitness = wx.CheckBox(self, -1, "Perfect LOO-recognition reached", \
name = "bestFitnessStop")
sizer.Add(self.bestFitness, pos=(0,0), \
flag = wx.LEFT | wx.RIGHT | wx.TOP | wx.EXPAND, border=10)
self.genericWidgets.append(self.bestFitness)
# generation counter
self.maxGeneration = wx.CheckBox(self, -1, "Max. number of generations", \
name = "maxGenerations")
sizer.Add(self.maxGeneration, pos=(1,0), \
flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border = 10)
self.maxGenerationCount = wx.SpinCtrl(self, -1, size=(100,-1), \
min=10, max=5000, value='100')
compat_wx.set_tool_tip(self.maxGenerationCount, "Number of generations")
self.maxGenerationCount.Disable()
sizer.Add(self.maxGenerationCount, pos=(1,1), \
flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border=10)
self.genericWidgets.append(self.maxGeneration)
self.AddChildToParent(self.maxGeneration, self.maxGenerationCount)
# fitness counter
self.maxFitnessEval = wx.CheckBox(self, -1, "Max. number of fitness evals", \
name = "maxFitnessEvals")
sizer.Add(self.maxFitnessEval, pos=(2,0), \
flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border=10)
self.maxFitnessEvalCount = wx.SpinCtrl(self, -1, size=(100,-1), \
min=10, max=50000, value='5000')
compat_wx.set_tool_tip(self.maxFitnessEvalCount, "Number of evaluations")
self.maxFitnessEvalCount.Disable()
sizer.Add(self.maxFitnessEvalCount, pos=(2,1), \
flag = wx.LEFT | wx.RIGHT | wx.EXPAND, border=10)
self.genericWidgets.append(self.maxFitnessEval)
self.AddChildToParent(self.maxFitnessEval, self.maxFitnessEvalCount)
# steady state continue
self.steadyContinue = wx.CheckBox(self, -1, "Steady state continue", \
name = "steadyStateStop")
self.steadyContinue.SetValue(True)
sizer.Add(self.steadyContinue, pos=(3,0), \
flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10)
self.steadyContinueMin = wx.SpinCtrl(self, -1, size=(100,-1), \
min=10, max=250000, value='40')
compat_wx.set_tool_tip(self.steadyContinueMin, "Minimum generations")
sizer.Add(self.steadyContinueMin, pos=(3,1), \
flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10)
self.steadyContinueNoChange = wx.SpinCtrl(self, -1, size=(100,-1), \
min=1, max=10000, value='10')
compat_wx.set_tool_tip(self.steadyContinueNoChange, "Generations without improvement")
sizer.Add(self.steadyContinueNoChange, pos=(3,2), \
flag = wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.EXPAND, border=10)
self.genericWidgets.append(self.steadyContinue)
self.AddChildToParent(self.steadyContinue, self.steadyContinueMin)
self.AddChildToParent(self.steadyContinue, self.steadyContinueNoChange)
# bind the EVT_CHECKBOX to the CheckBoxes
self.BindEvent(wx.EVT_CHECKBOX, self.OnCheckBox, \
[self.bestFitness, self.maxGeneration,
self.maxFitnessEval,
self.steadyContinue])
|
gpl-2.0
| -2,199,924,503,062,840,000
| 46.489796
| 94
| 0.613666
| false
| 3.714286
| false
| false
| false
|
BenKaehler/q2-feature-classifier
|
q2_feature_classifier/tests/__init__.py
|
1
|
1162
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import tempfile
import shutil
from warnings import filterwarnings
from qiime2.plugin.testing import TestPluginBase
class FeatureClassifierTestPluginBase(TestPluginBase):
def setUp(self):
try:
from q2_feature_classifier.plugin_setup import plugin
except ImportError:
self.fail("Could not import plugin object.")
self.plugin = plugin
self.temp_dir = tempfile.TemporaryDirectory(
prefix='q2-feature-classifier-test-temp-')
filterwarnings('ignore', 'The TaxonomicClassifier ', UserWarning)
def _setup_dir(self, filenames, dirfmt):
for filename in filenames:
filepath = self.get_data_path(filename)
shutil.copy(filepath, self.temp_dir.name)
return dirfmt(self.temp_dir.name, mode='r')
|
bsd-3-clause
| -7,658,275,673,037,822,000
| 32.2
| 78
| 0.598107
| false
| 4.723577
| false
| false
| false
|
kalyan02/dayone
|
do/lib.py
|
1
|
2300
|
from BeautifulSoup import BeautifulSoup as Soupify
import urllib, re
import settings
import oauth2, urlparse, json
from do import settings
class DropboxAPI(object):
def __init__(self, user):
self.user = user
dinfo = self.user.social_auth.get(provider='dropbox')
access_token = urlparse.parse_qs( dinfo.extra_data['access_token'] )
self.user_token = oauth2.Token(key=access_token['oauth_token'][0],secret=access_token['oauth_token_secret'][0])
self.cons_token = oauth2.Consumer(key=settings.DROPBOX_APP_ID,secret=settings.DROPBOX_API_SECRET)
def request( self, api_call, extra_params=None ):
self.parameters = {
'oauth_signature_method': oauth2.SignatureMethod_PLAINTEXT.name,
'oauth_timestamp' : oauth2.generate_timestamp(),
'oauth_nonce' : oauth2.generate_nonce(),
'oauth_version' : '1.0',
}
if type(extra_params) is dict:
self.parameters.update(extra_params)
self.req = oauth2.Request( url=api_call, parameters=self.parameters )
self.req.sign_request( signature_method=oauth2.SignatureMethod_PLAINTEXT(), token=self.user_token, consumer=self.cons_token)
return self.req
def call(self,method,params):
pass
def format_json(json_string):
return json.dumps( json.loads( json_string ), indent=4 )
# def file_put_contents( fname, fcon ):
# fh = open( fname, 'w+' )
# fh.write( fcon )
# fh.close()
# def file_get_contents( fname ):
# fh = open( fname, 'r')
# return fh.read()
# dropbox_url = "https://www.dropbox.com/sh/7gcfvmk9h107ryc/F39GaH7W8C"
# con = urllib.urlopen( dropbox_url ).read()
# file_put_contents( 'fh.txt', con )
# con = file_get_contents('fh.txt')
# scon = Soupify( con )
# entries_url = scon.findAll( 'a', attrs={'href':re.compile('/entries$')} )[0]['href']
# photos_url = scon.findAll( 'a', attrs={'href':re.compile('/photos$')} )[0]['href']
# print entries_url
# print photos_url
# # entries_page = urllib.urlopen(entries_url).read()
# # file_put_contents('entries_page.txt',entries_page)
# entries_page = file_get_contents('entries_page.txt')
# econ = Soupify(entries_page)
# posts = econ.findAll( 'a', attrs={'href':re.compile('\.doentry')} )
# urls = [ each['href'] for i,each in enumerate(posts) if i % 2 == 1 ]
# mods = econ.findAll( attrs={'class':'modified-time'} )
|
gpl-2.0
| -606,869,924,962,227,300
| 33.863636
| 126
| 0.677391
| false
| 2.90404
| false
| false
| false
|
cfobel/sconspiracy
|
Python/racy/plugins/qt/sconstools/qt4.py
|
1
|
21143
|
# ***** BEGIN LICENSE BLOCK *****
# Sconspiracy - Copyright (C) IRCAD, 2004-2010.
# Distributed under the terms of the BSD Licence as
# published by the Open Source Initiative.
# ****** END LICENSE BLOCK ******
"""SCons.Tool.qt
Tool-specific initialization for Qt.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
Tool provided by http://www.iua.upf.es/~dgarcia/Codders/sconstools.html
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "/home/scons/scons/branch.0/branch.96/baseline/src/engine/SCons/Tool/qt.py 0.96.92.D001 2006/04/10 23:13:27 knight"
import os.path
import re
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Scanner
import SCons.Tool
import SCons.Util
class ToolQtWarning(SCons.Warnings.Warning):
pass
class GeneratedMocFileNotIncluded(ToolQtWarning):
pass
class QtdirNotFound(ToolQtWarning):
pass
SCons.Warnings.enableWarningClass(ToolQtWarning)
qrcinclude_re = re.compile(r'<file>([^<]*)</file>', re.M)
def transformToWinePath(path) :
return os.popen('winepath -w "%s"'%path).read().strip().replace('\\','/')
header_extensions = [".h", ".hxx", ".hpp", ".hh"]
if SCons.Util.case_sensitive_suffixes('.h', '.H'):
header_extensions.append('.H')
# TODO: The following two lines will work when integrated back to SCons
# TODO: Meanwhile the third line will do the work
#cplusplus = __import__('c++', globals(), locals(), [])
#cxx_suffixes = cplusplus.CXXSuffixes
cxx_suffixes = [".c", ".cxx", ".cpp", ".cc"]
def checkMocIncluded(target, source, env):
moc = target[0]
cpp = source[0]
# looks like cpp.includes is cleared before the build stage :-(
# not really sure about the path transformations (moc.cwd? cpp.cwd?) :-/
path = SCons.Defaults.CScan.path_function(env, moc.cwd)
includes = SCons.Defaults.CScan(cpp, env, path)
if not moc in includes:
SCons.Warnings.warn(
GeneratedMocFileNotIncluded,
"Generated moc file '%s' is not included by '%s'" %
(str(moc), str(cpp)))
def find_file(filename, paths, node_factory):
for dir in paths:
node = node_factory(filename, dir)
if node.rexists():
return node
return None
class _Automoc:
"""
Callable class, which works as an emitter for Programs, SharedLibraries and
StaticLibraries.
"""
def __init__(self, objBuilderName):
self.objBuilderName = objBuilderName
def __call__(self, target, source, env):
"""
Smart autoscan function. Gets the list of objects for the Program
or Lib. Adds objects and builders for the special qt files.
"""
try:
if int(env.subst('$QT4_AUTOSCAN')) == 0:
return target, source
except ValueError:
pass
try:
debug = int(env.subst('$QT4_DEBUG'))
except ValueError:
debug = 0
# some shortcuts used in the scanner
splitext = SCons.Util.splitext
objBuilder = getattr(env, self.objBuilderName)
# some regular expressions:
# Q_OBJECT detection
q_object_search = re.compile(r'[^A-Za-z0-9]Q_OBJECT[^A-Za-z0-9]')
# cxx and c comment 'eater'
#comment = re.compile(r'(//.*)|(/\*(([^*])|(\*[^/]))*\*/)')
# CW: something must be wrong with the regexp. See also bug #998222
# CURRENTLY THERE IS NO TEST CASE FOR THAT
# The following is kind of hacky to get builders working properly (FIXME)
objBuilderEnv = objBuilder.env
objBuilder.env = env
mocBuilderEnv = env.Moc4.env
env.Moc4.env = env
# make a deep copy for the result; MocH objects will be appended
out_sources = source[:]
for obj in source:
if isinstance(obj,basestring): # big kludge!
print "scons: qt4: '%s' MAYBE USING AN OLD SCONS VERSION AND NOT CONVERTED TO 'File'. Discarded." % str(obj)
continue
if not obj.has_builder():
# binary obj file provided
if debug:
print "scons: qt: '%s' seems to be a binary. Discarded." % str(obj)
continue
cpp = obj.sources[0]
if not splitext(str(cpp))[1] in cxx_suffixes:
if debug:
print "scons: qt: '%s' is no cxx file. Discarded." % str(cpp)
# c or fortran source
continue
#cpp_contents = comment.sub('', cpp.get_contents())
try:
cpp_contents = cpp.get_contents()
except: continue # may be an still not generated source
h=None
for h_ext in header_extensions:
# try to find the header file in the corresponding source
# directory
hname = splitext(cpp.name)[0] + h_ext
h = find_file(hname, (cpp.get_dir(),), env.File)
if h:
if debug:
print "scons: qt: Scanning '%s' (header of '%s')" % (str(h), str(cpp))
#h_contents = comment.sub('', h.get_contents())
h_contents = h.get_contents()
break
if not h and debug:
print "scons: qt: no header for '%s'." % (str(cpp))
if h and q_object_search.search(h_contents):
# h file with the Q_OBJECT macro found -> add moc_cpp
moc_cpp = env.Moc4(h)
moc_o = objBuilder(moc_cpp)
out_sources.append(moc_o)
#moc_cpp.target_scanner = SCons.Defaults.CScan
if debug:
print "scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(h), str(moc_cpp))
if cpp and q_object_search.search(cpp_contents):
# cpp file with Q_OBJECT macro found -> add moc
# (to be included in cpp)
moc = env.Moc4(cpp)
env.Ignore(moc, moc)
if debug:
print "scons: qt: found Q_OBJECT macro in '%s', moc'ing to '%s'" % (str(cpp), str(moc))
#moc.source_scanner = SCons.Defaults.CScan
# restore the original env attributes (FIXME)
objBuilder.env = objBuilderEnv
env.Moc4.env = mocBuilderEnv
return (target, out_sources)
AutomocShared = _Automoc('SharedObject')
AutomocStatic = _Automoc('StaticObject')
def _detect(env):
"""Not really safe, but fast method to detect the QT library"""
try: return env['QTDIR']
except KeyError: pass
try: return os.environ['QTDIR']
except KeyError: pass
moc = env.WhereIs('moc-qt4') or env.WhereIs('moc4') or env.WhereIs('moc')
if moc:
QTDIR = os.path.dirname(os.path.dirname(moc))
SCons.Warnings.warn(
QtdirNotFound,
"QTDIR variable is not defined, using moc executable as a hint (QTDIR=%s)" % QTDIR)
return QTDIR
raise SCons.Errors.StopError(
QtdirNotFound,
"Could not detect Qt 4 installation")
return None
def generate(env):
"""Add Builders and construction variables for qt to an Environment."""
def locateQt4Command(env, command, qtdir) :
suffixes = [
'-qt4',
'-qt4.exe',
'4',
'4.exe',
'',
'.exe',
]
triedPaths = []
for suffix in suffixes :
fullpath = os.path.join(qtdir,'bin',command + suffix)
if os.access(fullpath, os.X_OK) :
return fullpath
triedPaths.append(fullpath)
fullpath = env.Detect([command+'-qt4', command+'4', command])
if not (fullpath is None) : return fullpath
raise Exception("Qt4 command '" + command + "' not found. Tried: " + ', '.join(triedPaths))
CLVar = SCons.Util.CLVar
Action = SCons.Action.Action
Builder = SCons.Builder.Builder
splitext = SCons.Util.splitext
env['QTDIR'] = _detect(env)
# TODO: 'Replace' should be 'SetDefault'
# env.SetDefault(
env.Replace(
QTDIR = env['QTDIR'],
QT4_BINPATH = os.path.join('$QTDIR', 'bin'),
QT4_CPPPATH = os.path.join('$QTDIR', 'include'),
QT4_LIBPATH = os.path.join('$QTDIR', 'lib'),
# TODO: This is not reliable to QTDIR value changes but needed in order to support '-qt4' variants
QT4_MOC = locateQt4Command(env,'moc', env['QTDIR']),
QT4_UIC = locateQt4Command(env,'uic', env['QTDIR']),
QT4_RCC = locateQt4Command(env,'rcc', env['QTDIR']),
QT4_LUPDATE = locateQt4Command(env,'lupdate', env['QTDIR']),
QT4_LRELEASE = locateQt4Command(env,'lrelease', env['QTDIR']),
QT4_LIB = '', # KLUDGE to avoid linking qt3 library
QT4_AUTOSCAN = 0, # Should the qt tool try to figure out, which sources are to be moc'ed?
# Some QT specific flags. I don't expect someone wants to
# manipulate those ...
QT4_UICFLAGS = CLVar(''),
QT4_MOCFROMHFLAGS = CLVar(''),
QT4_MOCFROMCXXFLAGS = CLVar('-i'),
QT4_QRCFLAGS = '',
# suffixes/prefixes for the headers / sources to generate
QT4_UISUFFIX = '.ui',
QT4_UICDECLPREFIX = 'ui_',
QT4_UICDECLSUFFIX = '.h',
QT4_MOCINCPREFIX = '-I',
QT4_MOCHPREFIX = 'moc_',
QT4_MOCHSUFFIX = '$CXXFILESUFFIX',
QT4_MOCCXXPREFIX = '',
QT4_MOCCXXSUFFIX = '.moc',
QT4_QRCSUFFIX = '.qrc',
QT4_QRCCXXSUFFIX = '$CXXFILESUFFIX',
QT4_QRCCXXPREFIX = 'qrc_',
QT4_MOCCPPPATH = [],
QT4_MOCINCFLAGS = '$( ${_concat(QT4_MOCINCPREFIX, QT4_MOCCPPPATH, INCSUFFIX, __env__, RDirs)} $)',
# Commands for the qt support ...
QT4_UICCOM = '$QT4_UIC $QT4_UICFLAGS -o $TARGET $SOURCE',
QT4_MOCFROMHCOM = '$QT4_MOC $QT4_MOCFROMHFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
QT4_MOCFROMCXXCOM = [
'$QT4_MOC $QT4_MOCFROMCXXFLAGS $QT4_MOCINCFLAGS -o $TARGET $SOURCE',
Action(checkMocIncluded,None)],
QT4_LUPDATECOM = '$QT4_LUPDATE $SOURCE -ts $TARGET',
QT4_LRELEASECOM = '$QT4_LRELEASE $SOURCE',
QT4_RCCCOM = '$QT4_RCC $QT4_QRCFLAGS $SOURCE -o $TARGET -name ${SOURCE.filebase}',
)
# Translation builder
tsbuilder = Builder(
action = SCons.Action.Action('$QT4_LUPDATECOM'), #,'$QT4_LUPDATECOMSTR'),
multi=1
)
env.Append( BUILDERS = { 'Ts': tsbuilder } )
qmbuilder = Builder(
action = SCons.Action.Action('$QT4_LRELEASECOM'),# , '$QT4_LRELEASECOMSTR'),
src_suffix = '.ts',
suffix = '.qm',
single_source = True
)
env.Append( BUILDERS = { 'Qm': qmbuilder } )
# Resource builder
def scanResources(node, env, path, arg):
# I've being careful on providing names relative to the qrc file
# If that was not needed that code could be simplified a lot
def recursiveFiles(basepath, path) :
result = []
for item in os.listdir(os.path.join(basepath, path)) :
itemPath = os.path.join(path, item)
if os.path.isdir(os.path.join(basepath, itemPath)) :
result += recursiveFiles(basepath, itemPath)
else:
result.append(itemPath)
return result
contents = node.get_contents()
includes = qrcinclude_re.findall(contents)
qrcpath = os.path.dirname(node.path)
dirs = [included for included in includes if os.path.isdir(os.path.join(qrcpath,included))]
# dirs need to include files recursively
for dir in dirs :
includes.remove(dir)
includes+=recursiveFiles(qrcpath,dir)
return includes
qrcscanner = SCons.Scanner.Scanner(name = 'qrcfile',
function = scanResources,
argument = None,
skeys = ['.qrc'])
qrcbuilder = Builder(
action = SCons.Action.Action('$QT4_RCCCOM'), #, '$QT4_RCCCOMSTR'),
source_scanner = qrcscanner,
src_suffix = '$QT4_QRCSUFFIX',
suffix = '$QT4_QRCCXXSUFFIX',
prefix = '$QT4_QRCCXXPREFIX',
single_source = True
)
env.Append( BUILDERS = { 'Qrc': qrcbuilder } )
# Interface builder
uic4builder = Builder(
action = SCons.Action.Action('$QT4_UICCOM'), #, '$QT4_UICCOMSTR'),
src_suffix='$QT4_UISUFFIX',
suffix='$QT4_UICDECLSUFFIX',
prefix='$QT4_UICDECLPREFIX',
single_source = True
#TODO: Consider the uiscanner on new scons version
)
env['BUILDERS']['Uic4'] = uic4builder
# Metaobject builder
mocBld = Builder(action={}, prefix={}, suffix={})
for h in header_extensions:
act = SCons.Action.Action('$QT4_MOCFROMHCOM') #, '$QT4_MOCFROMHCOMSTR')
mocBld.add_action(h, act)
mocBld.prefix[h] = '$QT4_MOCHPREFIX'
mocBld.suffix[h] = '$QT4_MOCHSUFFIX'
for cxx in cxx_suffixes:
act = SCons.Action.Action('$QT4_MOCFROMCXXCOM') #, '$QT4_MOCFROMCXXCOMSTR')
mocBld.add_action(cxx, act)
mocBld.prefix[cxx] = '$QT4_MOCCXXPREFIX'
mocBld.suffix[cxx] = '$QT4_MOCCXXSUFFIX'
env['BUILDERS']['Moc4'] = mocBld
# er... no idea what that was for
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
static_obj.src_builder.append('Uic4')
shared_obj.src_builder.append('Uic4')
# We use the emitters of Program / StaticLibrary / SharedLibrary
# to scan for moc'able files
# We can't refer to the builders directly, we have to fetch them
# as Environment attributes because that sets them up to be called
# correctly later by our emitter.
env.AppendUnique(PROGEMITTER =[AutomocStatic],
SHLIBEMITTER=[AutomocShared],
LIBEMITTER =[AutomocStatic],
# Of course, we need to link against the qt libraries
# CPPPATH=["$QT4_CPPPATH"],
LIBPATH=["$QT4_LIBPATH"],
LIBS=['$QT4_LIB'])
# TODO: Does dbusxml2cpp need an adapter
env.AddMethod(enable_modules, "EnableQt4Modules")
def enable_modules(self, modules, debug=False, crosscompiling=False) :
import sys
validModules = [
'QtCore',
'QtGui',
'QtOpenGL',
'Qt3Support',
'QtAssistant',
'QtScript',
'QtDBus',
'QtSql',
# The next modules have not been tested yet so, please
# maybe they require additional work on non Linux platforms
'QtNetwork',
'QtSvg',
'QtTest',
'QtXml',
'QtXmlPatterns',
'QtUiTools',
'QtDesigner',
'QtDesignerComponents',
'QtWebKit',
'QtHelp',
'QtScript',
]
pclessModules = [
# in qt <= 4.3 designer and designerComponents are pcless, on qt4.4 they are not, so removed.
# 'QtDesigner',
# 'QtDesignerComponents',
]
staticModules = [
'QtUiTools',
]
invalidModules=[]
for module in modules:
if module not in validModules :
invalidModules.append(module)
if invalidModules :
raise Exception("Modules %s are not Qt4 modules. Valid Qt4 modules are: %s"% (
str(invalidModules),str(validModules)))
moduleDefines = {
'QtScript' : ['QT_SCRIPT_LIB'],
'QtSvg' : ['QT_SVG_LIB'],
'Qt3Support' : ['QT_QT3SUPPORT_LIB','QT3_SUPPORT'],
'QtSql' : ['QT_SQL_LIB'],
'QtXml' : ['QT_XML_LIB'],
'QtOpenGL' : ['QT_OPENGL_LIB'],
'QtGui' : ['QT_GUI_LIB'],
'QtNetwork' : ['QT_NETWORK_LIB'],
'QtCore' : ['QT_CORE_LIB'],
}
for module in modules :
try : self.AppendUnique(CPPDEFINES=moduleDefines[module])
except: pass
debugSuffix = ''
if sys.platform in ["darwin", "linux2"] and not crosscompiling :
if debug : debugSuffix = '_debug'
for module in modules :
if module not in pclessModules : continue
self.AppendUnique(LIBS=[module+debugSuffix])
self.AppendUnique(LIBPATH=[os.path.join("$QTDIR","lib")])
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4")])
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4",module)])
pcmodules = [module+debugSuffix for module in modules if module not in pclessModules ]
if 'QtDBus' in pcmodules:
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4","QtDBus")])
if "QtAssistant" in pcmodules:
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","qt4","QtAssistant")])
pcmodules.remove("QtAssistant")
pcmodules.append("QtAssistantClient")
self.ParseConfig('pkg-config %s --libs --cflags'% ' '.join(pcmodules))
self["QT4_MOCCPPPATH"] = self["CPPPATH"]
return
if sys.platform == "win32" or crosscompiling :
if crosscompiling:
transformedQtdir = transformToWinePath(self['QTDIR'])
self['QT4_MOC'] = "QTDIR=%s %s"%( transformedQtdir, self['QT4_MOC'])
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include")])
try: modules.remove("QtDBus")
except: pass
if debug : debugSuffix = 'd'
if "QtAssistant" in modules:
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include","QtAssistant")])
modules.remove("QtAssistant")
modules.append("QtAssistantClient")
self.AppendUnique(LIBS=[lib+'4'+debugSuffix for lib in modules if lib not in staticModules])
self.PrependUnique(LIBS=[lib+debugSuffix for lib in modules if lib in staticModules])
if 'QtOpenGL' in modules:
self.AppendUnique(LIBS=['opengl32'])
self.AppendUnique(CPPPATH=[ '$QTDIR/include/'])
self.AppendUnique(CPPPATH=[ '$QTDIR/include/'+module for module in modules])
if crosscompiling :
self["QT4_MOCCPPPATH"] = [
path.replace('$QTDIR', transformedQtdir)
for path in self['CPPPATH'] ]
else :
self["QT4_MOCCPPPATH"] = self["CPPPATH"]
self.AppendUnique(LIBPATH=[os.path.join('$QTDIR','lib')])
return
"""
if sys.platform=="darwin" :
# TODO: Test debug version on Mac
self.AppendUnique(LIBPATH=[os.path.join('$QTDIR','lib')])
self.AppendUnique(LINKFLAGS="-F$QTDIR/lib")
self.AppendUnique(LINKFLAGS="-L$QTDIR/lib") #TODO clean!
if debug : debugSuffix = 'd'
for module in modules :
# self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include")])
# self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include",module)])
# port qt4-mac:
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include", "qt4")])
self.AppendUnique(CPPPATH=[os.path.join("$QTDIR","include", "qt4", module)])
if module in staticModules :
self.AppendUnique(LIBS=[module+debugSuffix]) # TODO: Add the debug suffix
self.AppendUnique(LIBPATH=[os.path.join("$QTDIR","lib")])
else :
# self.Append(LINKFLAGS=['-framework', module])
# port qt4-mac:
self.Append(LIBS=module)
if 'QtOpenGL' in modules:
self.AppendUnique(LINKFLAGS="-F/System/Library/Frameworks")
self.Append(LINKFLAGS=['-framework', 'AGL']) #TODO ughly kludge to avoid quotes
self.Append(LINKFLAGS=['-framework', 'OpenGL'])
self["QT4_MOCCPPPATH"] = self["CPPPATH"]
return
# This should work for mac but doesn't
# env.AppendUnique(FRAMEWORKPATH=[os.path.join(env['QTDIR'],'lib')])
# env.AppendUnique(FRAMEWORKS=['QtCore','QtGui','QtOpenGL', 'AGL'])
"""
def exists(env):
return _detect(env)
|
bsd-3-clause
| 6,324,493,422,340,307,000
| 38.593633
| 130
| 0.593577
| false
| 3.625965
| false
| false
| false
|
jpopelka/fabric8-analytics-worker
|
f8a_worker/solver.py
|
1
|
37059
|
"""Classes for resolving dependencies as specified in each ecosystem."""
import anymarkup
from bs4 import BeautifulSoup
from collections import defaultdict
from functools import cmp_to_key
import logging
from lxml import etree
from operator import itemgetter
from pip._internal.req.req_file import parse_requirements
from pip._vendor.packaging.specifiers import _version_split
import re
from requests import get
from semantic_version import Version as semver_version
from subprocess import check_output
from tempfile import NamedTemporaryFile, TemporaryDirectory
from urllib.parse import urljoin, quote
from urllib.request import urlopen
import requests
from f8a_worker.enums import EcosystemBackend
from f8a_worker.models import Analysis, Ecosystem, Package, Version
from f8a_worker.utils import cwd, TimedCommand
from f8a_worker.process import Git
logger = logging.getLogger(__name__)
class SolverException(Exception):
"""Exception to be raised in Solver."""
pass
class Tokens(object):
"""Comparison token representation."""
operators = ['>=', '<=', '==', '>', '<', '=', '!=']
(GTE, LTE, EQ1, GT, LT, EQ2, NEQ) = range(len(operators))
def compare_version(a, b):
"""Compare two version strings.
:param a: str
:param b: str
:return: -1 / 0 / 1
"""
def _range(q):
"""Convert a version string to array of integers.
"1.2.3" -> [1, 2, 3]
:param q: str
:return: List[int]
"""
r = []
for n in q.replace('-', '.').split('.'):
try:
r.append(int(n))
except ValueError:
# sort rc*, alpha, beta etc. lower than their non-annotated counterparts
r.append(-1)
return r
def _append_zeros(x, num_zeros):
"""Append `num_zeros` zeros to a copy of `x` and return it.
:param x: List[int]
:param num_zeros: int
:return: List[int]
"""
nx = list(x)
for _ in range(num_zeros):
nx.append(0)
return nx
def _cardinal(x, y):
"""Make both input lists be of same cardinality.
:param x: List[int]
:param y: List[int]
:return: List[int]
"""
lx, ly = len(x), len(y)
if lx == ly:
return x, y
elif lx > ly:
return x, _append_zeros(y, lx - ly)
else:
return _append_zeros(x, ly - lx), y
left, right = _cardinal(_range(a), _range(b))
return (left > right) - (left < right)
class ReleasesFetcher(object):
"""Base class for fetching releases."""
def __init__(self, ecosystem):
"""Initialize ecosystem."""
self._ecosystem = ecosystem
@property
def ecosystem(self):
"""Get ecosystem property."""
return self._ecosystem
def fetch_releases(self, package):
"""Abstract method for getting list of releases versions."""
raise NotImplementedError
class PypiReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for Pypi."""
def __init__(self, ecosystem):
"""Initialize instance."""
super(PypiReleasesFetcher, self).__init__(ecosystem)
def fetch_releases(self, package):
"""Fetch package releases versions.
XML-RPC API Documentation: https://wiki.python.org/moin/PyPIXmlRpc
Signature: package_releases(package_name, show_hidden=False)
"""
if not package:
raise ValueError("package")
package = package.lower()
pypi_package_url = urljoin(
self.ecosystem.fetch_url, '{pkg_name}/json'.format(pkg_name=package)
)
response = requests.get(pypi_package_url)
if response.status_code != 200:
logger.error('Unable to obtain a list of versions for {pkg_name}'.format(
pkg_name=package
))
return package, []
return package, list({x for x in response.json().get('releases', {})})
class NpmReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for NPM."""
def __init__(self, ecosystem):
"""Initialize instance."""
super(NpmReleasesFetcher, self).__init__(ecosystem)
def fetch_releases(self, package):
"""Fetch package releases versions.
Example output from the NPM endpoint:
{
...
versions: {
"0.1.0": {},
"0.1.2": {}
...
}
}
"""
if not package:
raise ValueError("package")
# quote '/' (but not '@') in scoped package name, e.g. in '@slicemenice/item-layouter'
r = get(self.ecosystem.fetch_url + quote(package, safe='@'))
if r.status_code == 200 and r.content:
return package, list(r.json().get('versions', {}).keys())
return package, []
class RubyGemsReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for Rubygems."""
def __init__(self, ecosystem):
"""Initialize instance."""
super(RubyGemsReleasesFetcher, self).__init__(ecosystem)
def _search_package_name(self, package):
"""Search package on rubygems.org."""
url = '{url}/search.json?query={pkg}'.format(url=self.ecosystem.fetch_url,
pkg=package)
r = get(url)
if r.status_code == 200:
exact_match = [p['name']
for p in r.json()
if p['name'].lower() == package.lower()]
if exact_match:
return exact_match.pop()
raise ValueError("Package {} not found".format(package))
def fetch_releases(self, package):
"""Fetch package releases versions.
Example output from the RubyGems endpoint
[
{
"number": "1.0.0",
...
},
{
"number": "2.0.0",
...
}
...
]
"""
if not package:
raise ValueError("package")
url = '{url}/versions/{pkg}.json'.format(url=self.ecosystem.fetch_url,
pkg=package)
r = get(url)
if r.status_code == 404:
return self.fetch_releases(self._search_package_name(package))
return package, [ver['number'] for ver in r.json()]
class NugetReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for Nuget."""
def __init__(self, ecosystem):
"""Initialize instance."""
super(NugetReleasesFetcher, self).__init__(ecosystem)
def scrape_versions_from_nuget_org(self, package, sort_by_downloads=False):
"""Scrape 'Version History' from Nuget."""
releases = []
nuget_packages_url = 'https://www.nuget.org/packages/'
page = get(nuget_packages_url + package)
page = BeautifulSoup(page.text, 'html.parser')
version_history = page.find(class_="version-history")
for version in version_history.find_all(href=re.compile('/packages/')):
version_text = version.text.replace('(current)', '').strip()
try:
semver_version.coerce(version_text)
downloads = int(version.find_next('td').text.strip().replace(',', ''))
except ValueError:
pass
else:
releases.append((version_text, downloads))
if sort_by_downloads:
releases.sort(key=itemgetter(1))
return package, [p[0] for p in reversed(releases)]
def fetch_releases(self, package):
"""Fetch package releases versions."""
if not package:
raise ValueError("package not specified")
# There's an API interface which lists available releases at
# https://api.nuget.org/v3-flatcontainer/{package}/index.json
# But it lists also unlisted/deprecated/shouldn't-be-used versions,
# so we don't use it.
return self.scrape_versions_from_nuget_org(package)
class MavenReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for Maven."""
def __init__(self, ecosystem):
"""Initialize instance."""
super().__init__(ecosystem)
def releases_from_maven_org(self, group_id, artifact_id):
"""Fetch releases versions for group_id/artifact_id."""
metadata_filenames = ['maven-metadata.xml', 'maven-metadata-local.xml']
group_id_path = group_id.replace('.', '/')
versions = set()
we_good = False
for filename in metadata_filenames:
url = urljoin(
self.ecosystem.fetch_url,
'{g}/{a}/{f}'.format(g=group_id_path, a=artifact_id, f=filename)
)
try:
metadata_xml = etree.parse(urlopen(url))
we_good = True # We successfully downloaded at least one of the metadata files
version_elements = metadata_xml.findall('.//version')
versions = versions.union({x.text for x in version_elements})
except OSError:
# Not both XML files have to exist, so don't freak out yet
pass
if not we_good:
logger.error('Unable to obtain a list of versions for {g}:{a}'.format(
g=group_id, a=artifact_id)
)
return list(versions)
def fetch_releases(self, package):
"""Fetch package releases versions."""
if not package:
raise ValueError("package not specified")
try:
group_id, artifact_id = package.split(':')
except ValueError as exc:
raise ValueError("Invalid Maven coordinates: {a}".format(a=package)) from exc
return package, self.releases_from_maven_org(group_id, artifact_id)
class GolangReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for Golang."""
def __init__(self, ecosystem):
"""Initialize instance."""
super(GolangReleasesFetcher, self).__init__(ecosystem)
def fetch_releases(self, package):
"""Fetch package releases versions."""
if not package:
raise ValueError('package not specified')
parts = package.split("/")[:3]
if len(parts) == 3: # this assumes github.com/org/project like structure
host, org, proj = parts
repo_url = 'git://{host}/{org}/{proj}.git'.format(host=host, org=org, proj=proj)
elif len(parts) == 2 and parts[0] == 'gopkg.in': # specific to gopkg.in/packages
host, proj = parts
repo_url = 'https://{host}/{proj}.git'.format(host=host, proj=proj)
else:
raise ValueError("Package {} is invalid git repository".format(package))
output = Git.ls_remote(repo_url, args=['-q'], refs=['HEAD'])
version, ref = output[0].split()
if not version:
raise ValueError("Package {} does not have associated versions".format(package))
return package, [version]
class F8aReleasesFetcher(ReleasesFetcher):
"""Releases fetcher for internal database."""
def __init__(self, ecosystem, database):
"""Initialize instance."""
super(F8aReleasesFetcher, self).__init__(ecosystem)
self.database = database
def fetch_releases(self, package):
"""Fetch analysed versions for specific ecosystem + package from f8a."""
query = self.database.query(Version).\
join(Analysis).join(Package).join(Ecosystem).\
filter(Package.name == package,
Ecosystem.name == self.ecosystem.name,
Analysis.finished_at.isnot(None))
versions = {v.identifier for v in query}
return package, list(sorted(versions, key=cmp_to_key(compare_version)))
class Dependency(object):
"""A Dependency consists of (package) name and version spec."""
def __init__(self, name, spec):
"""Initialize instance."""
self._name = name
# spec is a list where each item is either 2-tuple (operator, version) or list of these
# example: [[('>=', '0.6.0'), ('<', '0.7.0')], ('>', '1.0.0')] means:
# (>=0.6.0 and <0.7.0) or >1.0.0
self._spec = spec
@property
def name(self):
"""Get name property."""
return self._name
@property
def spec(self):
"""Get version spec property."""
return self._spec
def __contains__(self, item):
"""Implement 'in' operator."""
return self.check(item)
def __repr__(self):
"""Return string representation of this instance."""
return "{} {}".format(self.name, self.spec)
def __eq__(self, other):
"""Implement '==' operator."""
return self.name == other.name and self.spec == other.spec
def check(self, version):
"""Check if `version` fits into our dependency specification.
:param version: str
:return: bool
"""
def _compare_spec(spec):
if len(spec) == 1:
spec = ('=', spec[0])
token = Tokens.operators.index(spec[0])
comparison = compare_version(version, spec[1])
if token in [Tokens.EQ1, Tokens.EQ2]:
return comparison == 0
elif token == Tokens.GT:
return comparison == 1
elif token == Tokens.LT:
return comparison == -1
elif token == Tokens.GTE:
return comparison >= 0
elif token == Tokens.LTE:
return comparison <= 0
elif token == Tokens.NEQ:
return comparison != 0
else:
raise ValueError('Invalid comparison token')
def _all(spec_):
return all(_all(s) if isinstance(s, list) else _compare_spec(s) for s in spec_)
return any(_all(s) if isinstance(s, list) else _compare_spec(s) for s in self.spec)
class DependencyParser(object):
"""Base class for Dependency parsing."""
def parse(self, specs):
"""Abstract method for Dependency parsing."""
pass
@staticmethod
def compose_sep(deps, separator):
"""Opposite of parse().
:param deps: list of Dependency()
:param separator: when joining dependencies, use this separator
:return: dict of {name: version spec}
"""
result = {}
for dep in deps:
if dep.name not in result:
result[dep.name] = separator.join([op + ver for op, ver in dep.spec])
else:
result[dep.name] += separator + separator.join([op + ver for op, ver in dep.spec])
return result
class PypiDependencyParser(DependencyParser):
"""Pypi Dependency parsing."""
@staticmethod
def _parse_python(spec):
"""Parse PyPI specification of a single dependency.
:param spec: str, for example "Django>=1.5,<1.8"
:return: [Django [[('>=', '1.5'), ('<', '1.8')]]]
"""
def _extract_op_version(spec):
# https://www.python.org/dev/peps/pep-0440/#compatible-release
if spec.operator == '~=':
version = _version_split(spec.version)
if len(version) > 1:
# ignore pre-release, post-release or developmental release
while not version[-1].isdigit():
del version[-1]
del version[-1] # will increase the last but one in next line
version[-1] = str(int(version[-1]) + 1)
else:
raise ValueError('%r must not be used with %r' % (spec.operator, spec.version))
return [('>=', spec.version), ('<', '.'.join(version))]
# Trailing .* is permitted per
# https://www.python.org/dev/peps/pep-0440/#version-matching
elif spec.operator == '==' and spec.version.endswith('.*'):
try:
result = check_output(['/usr/bin/semver-ranger', spec.version],
universal_newlines=True).strip()
gte, lt = result.split()
return [('>=', gte.lstrip('>=')), ('<', lt.lstrip('<'))]
except ValueError:
logger.info("couldn't resolve ==%s", spec.version)
return spec.operator, spec.version
# https://www.python.org/dev/peps/pep-0440/#arbitrary-equality
# Use of this operator is heavily discouraged, so just convert it to 'Version matching'
elif spec.operator == '===':
return '==', spec.version
else:
return spec.operator, spec.version
def _get_pip_spec(requirements):
"""There's no `specs` field In Pip 8+, take info from `specifier` field."""
if hasattr(requirements, 'specs'):
return requirements.specs
elif hasattr(requirements, 'specifier'):
specs = [_extract_op_version(spec) for spec in requirements.specifier]
if len(specs) == 0:
specs = [('>=', '0.0.0')]
elif len(specs) > 1:
specs = [specs]
return specs
# create a temporary file and store the spec there since
# `parse_requirements` requires a file
with NamedTemporaryFile(mode='w+', suffix='pysolve') as f:
f.write(spec)
f.flush()
parsed = parse_requirements(f.name, session=f.name)
dependency = [Dependency(x.name, _get_pip_spec(x.req)) for x in parsed].pop()
return dependency
def parse(self, specs):
"""Parse specs."""
return [self._parse_python(s) for s in specs]
@staticmethod
def compose(deps):
"""Compose deps."""
return DependencyParser.compose_sep(deps, ',')
@staticmethod
def restrict_versions(deps):
"""Not implemented."""
return deps # TODO
class NpmDependencyParser(DependencyParser):
"""NPM Dependency parsing."""
@staticmethod
def _parse_npm_tokens(spec):
"""Parse npm tokens."""
for token in Tokens.operators:
if token in spec:
return token, spec.split(token)[1]
return spec,
def _parse_npm(self, name, spec):
"""Parse NPM specification of a single dependency.
:param name: str
:param spec: str
:return: Dependency
"""
if spec == 'latest':
specs = '*'
else:
specs = check_output(['/usr/bin/semver-ranger', spec], universal_newlines=True).strip()
if specs == 'null':
logger.info("invalid version specification for %s = %s", name, spec)
return None
ret = []
for s in specs.split('||'):
if ' ' in s:
spaced = s.split(' ')
assert len(spaced) == 2
left, right = spaced
ret.append([self._parse_npm_tokens(left), self._parse_npm_tokens(right)])
elif s == '*':
ret.append(('>=', '0.0.0'))
else:
ret.append(self._parse_npm_tokens(s))
return Dependency(name, ret)
def parse(self, specs):
"""Transform list of dependencies (strings) to list of Dependency."""
deps = []
for spec in specs:
name, ver = spec.split(' ', 1)
parsed = self._parse_npm(name, ver)
if parsed:
deps.append(parsed)
return deps
@staticmethod
def compose(deps):
"""Oposite of parse()."""
return DependencyParser.compose_sep(deps, ' ')
@staticmethod
def restrict_versions(deps):
"""From list of semver ranges select only the most restricting ones for each operator.
:param deps: list of Dependency(), example:
[node [('>=', '0.6.0')], node [('<', '1.0.0')], node [('>=', '0.8.0')]]
:return: list of Dependency() with only the most restrictive versions, example:
[node [('<', '1.0.0')], node [('>=', '0.8.0')]]
"""
# list to dict
# {
# 'node' : {
# '>=': ['0.8.0', '0.6.0'],
# '<': ['1.0.0']
# }
# }
dps_dict = defaultdict(dict)
for dp in deps:
if dp.name not in dps_dict:
dps_dict[dp.name] = defaultdict(list)
for spec in dp.spec:
if len(spec) != 2:
continue
operator, version = spec
dps_dict[dp.name][operator].append(version)
# select only the most restrictive versions
result = []
for name, version_spec_dict in dps_dict.items():
specs = []
for operator, versions in version_spec_dict.items():
if operator in ['>', '>=']: # select highest version
version = sorted(versions, key=cmp_to_key(compare_version))[-1]
elif operator in ['<', '<=']: # select lowest version
version = sorted(versions, key=cmp_to_key(compare_version))[0]
specs.append((operator, version))
# dict back to list
result.append(Dependency(name, specs))
return result
RubyGemsDependencyParser = NpmDependencyParser
class OSSIndexDependencyParser(NpmDependencyParser):
"""Parse OSS Index version specification."""
def _parse_npm(self, name, spec):
"""Parse OSS Index version specification. It's similar to NPM semver, with few tweaks."""
# sometimes there's '|' instead of '||', but the meaning seems to be the same
spec = spec.replace(' | ', ' || ')
# remove superfluous brackets
spec = spec.replace('(', '').replace(')', '')
return super()._parse_npm(name, spec)
class NugetDependencyParser(object):
"""Nuget version specification parsing."""
def parse(self, specs):
"""Transform list of dependencies (strings) to list of Dependency.
https://docs.microsoft.com/en-us/nuget/create-packages/dependency-versions#version-ranges
:param specs: list of dependencies (strings)
:return: list of Dependency
"""
# TODO: reduce cyclomatic complexity
deps = []
for spec in specs:
name, version_range = spec.split(' ', 1)
# 1.0 -> 1.0≤x
if re.search(r'[,()\[\]]', version_range) is None:
dep = Dependency(name, [('>=', version_range)])
# [1.0,2.0] -> 1.0≤x≤2.0
elif re.fullmatch(r'\[(.+),(.+)\]', version_range):
m = re.fullmatch(r'\[(.+),(.+)\]', version_range)
dep = Dependency(name, [[('>=', m.group(1)), ('<=', m.group(2))]])
# (1.0,2.0) -> 1.0<x<2.0
elif re.fullmatch(r'\((.+),(.+)\)', version_range):
m = re.fullmatch(r'\((.+),(.+)\)', version_range)
dep = Dependency(name, [[('>', m.group(1)), ('<', m.group(2))]])
# The following one is not in specification,
# so we can just guess what was the intention.
# Seen in NLog:5.0.0-beta08 dependencies
# [1.0, ) -> 1.0≤x
elif re.fullmatch(r'\[(.+), \)', version_range):
m = re.fullmatch(r'\[(.+), \)', version_range)
dep = Dependency(name, [('>=', m.group(1))])
# [1.0,2.0) -> 1.0≤x<2.0
elif re.fullmatch(r'\[(.+),(.+)\)', version_range):
m = re.fullmatch(r'\[(.+),(.+)\)', version_range)
dep = Dependency(name, [[('>=', m.group(1)), ('<', m.group(2))]])
# (1.0,) -> 1.0<x
elif re.fullmatch(r'\((.+),\)', version_range):
m = re.fullmatch(r'\((.+),\)', version_range)
dep = Dependency(name, [('>', m.group(1))])
# [1.0] -> x==1.0
elif re.fullmatch(r'\[(.+)\]', version_range):
m = re.fullmatch(r'\[(.+)\]', version_range)
dep = Dependency(name, [('==', m.group(1))])
# (,1.0] -> x≤1.0
elif re.fullmatch(r'\(,(.+)\]', version_range):
m = re.fullmatch(r'\(,(.+)\]', version_range)
dep = Dependency(name, [('<=', m.group(1))])
# (,1.0) -> x<1.0
elif re.fullmatch(r'\(,(.+)\)', version_range):
m = re.fullmatch(r'\(,(.+)\)', version_range)
dep = Dependency(name, [('<', m.group(1))])
elif re.fullmatch(r'\((.+)\)', version_range):
raise ValueError("invalid version range %r" % version_range)
deps.append(dep)
return deps
class NoOpDependencyParser(DependencyParser):
"""Dummy dependency parser for ecosystems that don't support version ranges."""
def parse(self, specs):
"""Transform list of dependencies (strings) to list of Dependency."""
return [Dependency(*x.split(' ')) for x in specs]
@staticmethod
def compose(deps):
"""Opposite of parse()."""
return DependencyParser.compose_sep(deps, ' ')
@staticmethod
def restrict_versions(deps):
"""Not implemented."""
return deps
class GolangDependencyParser(DependencyParser):
"""Dependency parser for Golang."""
def parse(self, specs):
"""Transform list of dependencies (strings) to list of Dependency."""
dependencies = []
for spec in specs:
spec_list = spec.split(' ')
if len(spec_list) > 1:
dependencies.append(Dependency(spec_list[0], spec_list[1]))
else:
dependencies.append(Dependency(spec_list[0], ''))
return dependencies
@staticmethod
def compose(deps):
"""Opposite of parse()."""
return DependencyParser.compose_sep(deps, ' ')
@staticmethod
def restrict_versions(deps):
"""Not implemented."""
return deps
class Solver(object):
"""Base class for resolving dependencies."""
def __init__(self, ecosystem, dep_parser=None, fetcher=None, highest_dependency_version=True):
"""Initialize instance."""
self.ecosystem = ecosystem
self._dependency_parser = dep_parser
self._release_fetcher = fetcher
self._highest_dependency_version = highest_dependency_version
@property
def dependency_parser(self):
"""Return DependencyParser instance used by this solver."""
return self._dependency_parser
@property
def release_fetcher(self):
"""Return ReleasesFetcher instance used by this solver."""
return self._release_fetcher
def solve(self, dependencies, graceful=True, all_versions=False):
"""Solve `dependencies` against upstream repository.
:param dependencies: List, List of dependencies in native format
:param graceful: bool, Print info output to stdout
:param all_versions: bool, Return all matched versions instead of the latest
:return: Dict[str, str], Matched versions
"""
solved = {}
for dep in self.dependency_parser.parse(dependencies):
logger.debug("Fetching releases for: {}".format(dep))
name, releases = self.release_fetcher.fetch_releases(dep.name)
if name in solved:
raise SolverException("Dependency: {} is listed multiple times".format(name))
if not releases:
if graceful:
logger.info("No releases found for: %s", dep.name)
else:
raise SolverException("No releases found for: {}".format(dep.name))
matching = sorted([release
for release in releases
if release in dep], key=cmp_to_key(compare_version))
logger.debug(" matching:\n {}".format(matching))
if all_versions:
solved[name] = matching
else:
if not matching:
solved[name] = None
else:
if self._highest_dependency_version:
solved[name] = matching[-1]
else:
solved[name] = matching[0]
return solved
class PypiSolver(Solver):
"""Pypi dependencies solver."""
def __init__(self, ecosystem, parser=None, fetcher=None):
"""Initialize instance."""
super(PypiSolver, self).__init__(ecosystem,
parser or PypiDependencyParser(),
fetcher or PypiReleasesFetcher(ecosystem))
class NpmSolver(Solver):
"""Npm dependencies solver."""
def __init__(self, ecosystem, parser=None, fetcher=None):
"""Initialize instance."""
super(NpmSolver, self).__init__(ecosystem,
parser or NpmDependencyParser(),
fetcher or NpmReleasesFetcher(ecosystem))
class RubyGemsSolver(Solver):
"""Rubygems dependencies solver."""
def __init__(self, ecosystem, parser=None, fetcher=None):
"""Initialize instance."""
super(RubyGemsSolver, self).__init__(ecosystem,
parser or RubyGemsDependencyParser(),
fetcher or RubyGemsReleasesFetcher(ecosystem))
class NugetSolver(Solver):
"""Nuget dependencies solver.
Nuget is a bit specific because it by default resolves version specs to lowest possible version.
https://docs.microsoft.com/en-us/nuget/release-notes/nuget-2.8#-dependencyversion-switch
"""
def __init__(self, ecosystem, parser=None, fetcher=None):
"""Initialize instance."""
super(NugetSolver, self).__init__(ecosystem,
parser or NugetDependencyParser(),
fetcher or NugetReleasesFetcher(ecosystem),
highest_dependency_version=False)
class MavenManualSolver(Solver):
"""Use this only if you need to resolve all versions or use specific DependencyParser.
Otherwise use MavenSolver (below).
"""
def __init__(self, ecosystem, parser, fetcher=None):
"""Initialize instance."""
super().__init__(ecosystem,
parser,
fetcher or MavenReleasesFetcher(ecosystem))
class GolangSolver(Solver):
"""Golang dependencies solver."""
def __init__(self, ecosystem, parser=None, fetcher=None):
"""Initialize instance."""
super(GolangSolver, self).__init__(ecosystem,
parser or GolangDependencyParser(),
fetcher or GolangReleasesFetcher(ecosystem))
def solve(self, dependencies):
"""Solve `dependencies` against upstream repository."""
result = {}
for dependency in self.dependency_parser.parse(dependencies):
if dependency.spec:
result[dependency.name] = dependency.spec
else:
version = self.release_fetcher.fetch_releases(dependency.name)[1][0]
result[dependency.name] = version
return result
class MavenSolver(object):
"""Doesn't inherit from Solver, because we don't use its solve().
We also don't need a DependencyParser nor a ReleasesFetcher for Maven.
'mvn versions:resolve-ranges' does all the dirty work for us.
Resolves only to one version, so if you need solve(all_versions=True), use MavenManualSolver
"""
@staticmethod
def _generate_pom_xml(to_solve):
"""Create pom.xml with dependencies from to_solve.
And run 'mvn versions:resolve-ranges',
which resolves the version ranges (overwrites the pom.xml).
:param to_solve: {"groupId:artifactId": "version-range"}
"""
project = etree.Element('project')
etree.SubElement(project, 'modelVersion').text = '4.0.0'
etree.SubElement(project, 'groupId').text = 'foo.bar.baz'
etree.SubElement(project, 'artifactId').text = 'testing'
etree.SubElement(project, 'version').text = '1.0.0'
dependencies = etree.SubElement(project, 'dependencies')
for name, version_range in to_solve.items():
group_id, artifact_id = name.rstrip(':').split(':')
dependency = etree.SubElement(dependencies, 'dependency')
etree.SubElement(dependency, 'groupId').text = group_id
etree.SubElement(dependency, 'artifactId').text = artifact_id
etree.SubElement(dependency, 'version').text = version_range
with open('pom.xml', 'wb') as pom:
pom.write(etree.tostring(project, xml_declaration=True, pretty_print=True))
TimedCommand.get_command_output(['mvn', 'versions:resolve-ranges'], graceful=False)
@staticmethod
def _dependencies_from_pom_xml():
"""Extract dependencies from pom.xml in current directory.
:return: {"groupId:artifactId": "version"}
"""
solved = {}
with open('pom.xml') as r:
pom_dict = anymarkup.parse(r.read())
dependencies = pom_dict.get('project', {}).get('dependencies', {}).get('dependency', [])
if not isinstance(dependencies, list):
dependencies = [dependencies]
for dependency in dependencies:
name = "{}:{}".format(dependency['groupId'], dependency['artifactId'])
solved[name] = str(dependency['version'])
return solved
@staticmethod
def _resolve_versions(to_solve):
"""Resolve version ranges in to_solve.
:param to_solve: {"groupId:artifactId": "version-range"}
:return: {"groupId:artifactId": "version"}
"""
if not to_solve:
return {}
with TemporaryDirectory() as tmpdir:
with cwd(tmpdir):
MavenSolver._generate_pom_xml(to_solve)
return MavenSolver._dependencies_from_pom_xml()
@staticmethod
def is_version_range(ver_spec):
"""Check whether ver_spec contains version range."""
# http://maven.apache.org/enforcer/enforcer-rules/versionRanges.html
return re.search(r'[,()\[\]]', ver_spec) is not None
def solve(self, dependencies):
"""Solve version ranges in dependencies."""
already_solved = {}
to_solve = {}
for dependency in dependencies:
name, ver_spec = dependency.split(' ', 1)
if not self.is_version_range(ver_spec):
already_solved[name] = ver_spec
else:
to_solve[name] = ver_spec
result = already_solved.copy()
result.update(self._resolve_versions(to_solve))
return result
def get_ecosystem_solver(ecosystem, with_parser=None, with_fetcher=None):
"""Get Solver subclass instance for particular ecosystem.
:param ecosystem: Ecosystem
:param with_parser: DependencyParser instance
:param with_fetcher: ReleasesFetcher instance
:return: Solver
"""
if ecosystem.is_backed_by(EcosystemBackend.maven):
if with_parser is None:
return MavenSolver()
else:
return MavenManualSolver(ecosystem, with_parser, with_fetcher)
elif ecosystem.is_backed_by(EcosystemBackend.npm):
return NpmSolver(ecosystem, with_parser, with_fetcher)
elif ecosystem.is_backed_by(EcosystemBackend.pypi):
return PypiSolver(ecosystem, with_parser, with_fetcher)
elif ecosystem.is_backed_by(EcosystemBackend.rubygems):
return RubyGemsSolver(ecosystem, with_parser, with_fetcher)
elif ecosystem.is_backed_by(EcosystemBackend.nuget):
return NugetSolver(ecosystem, with_parser, with_fetcher)
elif ecosystem.is_backed_by(EcosystemBackend.go):
return GolangSolver(ecosystem, with_parser, with_fetcher)
raise ValueError('Unknown ecosystem: {}'.format(ecosystem.name))
def get_ecosystem_parser(ecosystem):
"""Get DependencyParser subclass instance for particular ecosystem."""
if ecosystem.is_backed_by(EcosystemBackend.maven):
return NoOpDependencyParser()
elif ecosystem.is_backed_by(EcosystemBackend.npm):
return NpmDependencyParser()
elif ecosystem.is_backed_by(EcosystemBackend.pypi):
return PypiDependencyParser()
elif ecosystem.is_backed_by(EcosystemBackend.rubygems):
return RubyGemsDependencyParser()
elif ecosystem.is_backed_by(EcosystemBackend.nuget):
return NugetDependencyParser()
elif ecosystem.is_backed_by(EcosystemBackend.go):
return GolangDependencyParser()
raise ValueError('Unknown ecosystem: {}'.format(ecosystem.name))
|
gpl-3.0
| -6,675,852,524,338,168,000
| 35.320588
| 100
| 0.562799
| false
| 4.107206
| false
| false
| false
|
JohnLunzer/flexx
|
flexx/ui/widgets/_html.py
|
1
|
3211
|
"""
Simple example:
.. UIExample:: 75
from flexx import app, ui
class Example(ui.Widget):
def init(self):
with ui.html.UL():
ui.html.LI(text='foo')
ui.html.LI(text='bar')
.. UIExample:: 150
from flexx import app, ui, event
class Example(ui.Widget):
def init(self):
with ui.html.UL():
ui.html.LI(text='foo')
ui.html.LI(text='bar')
with ui.html.LI():
with ui.html.I():
self.now = ui.html.Span(text='0')
self.but = ui.html.Button(text='press me')
class JS:
@event.connect('but.mouse_down')
def on_click(self, *events):
self.now.text = window.Date.now()
"""
from ... import event
from . import Widget
class Div(Widget):
"""
This class is the base class for "HTML widgets". These provides a
lower-level way of working with HTML content that can feel more
natural to users with a background in web development.
Via the ``flexx.ui.html`` factory object, it is possible to create *any*
type of DOM element. E.g. ``ui.html.Table()`` creates an table and
``ui.html.b(text='foo')`` creates a piece of bold text.
Since this class inherits from ``Widget``, all base widget functionality
(e.g. mouse events) work as expected. However, the specific functionality
of each element (e.g. ``src`` for img elements) must be used in the
"JavaScript way".
In contrast to regular Flexx widgets, the css class name of the node only
consists of the name(s) provided via the ``css_class`` property.
Also see :ref:`this example <classic_web_dev.py>`.
"""
class Both:
@event.prop
def text(self, v=''):
""" The inner HTML for this element.
"""
return str(v)
class JS:
def __init__(self, *args):
super().__init__(*args)
self.node.className = ''
def _init_phosphor_and_node(self):
self.phosphor = self._create_phosphor_widget(self._class_name.lower())
self.node = self.phosphor.node
@event.connect('text')
def __on_inner_html(self, *events):
self.node.innerHTML = events[-1].new_value
def _add_child(self, widget):
self.node.appendChild(widget.node)
class HTMLElementFactory:
"""
This object can be used to generate a Flexx Widget class for any
HTML element that you'd like. These Widget classes inherit from ``Div``.
"""
def __getattr__(self, name):
name = name.lower()
cache = globals()
if name.startswith('_'):
return super().__getattr__(name)
if name not in cache:
# Create new class, put it in this module so that JSModule can find it
cls = type(name, (Div,), {})
cls.__module__ = cls.__jsmodule__ = __name__
cache[name] = cls
return cache[name]
html = HTMLElementFactory()
|
bsd-2-clause
| -7,527,310,105,070,645,000
| 27.415929
| 82
| 0.540953
| false
| 4.111396
| false
| false
| false
|
oskyar/test-TFG
|
TFG/urls.py
|
1
|
2144
|
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.auth.views import login, logout_then_login
from django.views.static import serve
import logging
from importlib import import_module
from django.conf import settings
from TFG.apps.handlererrors.views import Error403, Error404, Error500
from TFG.apps.user.views import Index
from vanilla import TemplateView
from django.core import exceptions
from TFG.apps.user.views import ClientViewErrors
# from registration.views import RegistrationView
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'TFG.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', Index.as_view(), name='index'),
url(r'^cookies$', TemplateView.as_view(template_name="cookies.html"), name='cookies'),
url(r'^$', login, {'template_name': 'user/login.html'},
name='login'),
url(r'^logout/$', logout_then_login, name='logout'),
# url(r'^', include('TFG.apps.handlererrors.urls')),
# url(r'^db', TFG.apps.index.views.db, name='db'),
url(r'^admin/', include(admin.site.urls)),
url(r'^chaining/', include('smart_selects.urls')),
url(r'^user/', include('TFG.apps.user.urls')),
url(r'^test/', include('TFG.apps.test.urls')),
url(r'^subject/', include('TFG.apps.subject.urls')),
url(r'^search/', include('TFG.apps.search.urls')),
url(r'^s3direct/', include('s3direct.urls')),
# url(r'^test/', include('TFG.apps.test.urls')),
url(r'^media/(?P<path>.*)$', serve,
{'document_root': settings.MEDIA_ROOT,}),
]
# import_module("TFG.apps.index.signals")
# Este código sirve para buscar todos los signals añadidos a las apps
"""logger = logging.getLogger(__name__)
signal_modules = {}
for app in settings.INSTALLED_APPS:
signals_module = '%s.signals' % app
try:
logger.debug('loading "%s" ..' % signals_module)
signal_modules[app] = import_module(signals_module)
except ImportError as e:
logger.warning(
'failed to import "%s", reason: %s' % (signals_module, str(e)))
"""
|
gpl-2.0
| 830,453,397,419,805,600
| 34.7
| 90
| 0.668534
| false
| 3.346875
| false
| false
| false
|
dingzg/onepanel
|
lib/module/user.py
|
1
|
10650
|
#!/usr/bin/env python2.6
#-*- coding: utf-8 -*-
# Copyright [OnePanel]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package for user management.
"""
import os
if __name__ == '__main__':
import sys
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, root_path)
import pexpect
import shlex
import time
import pwd
import grp
import subprocess
from utils import b2h, ftime
#---------------------------------------------------------------------------------------------------
#Function Name : main_process
#Usage :
#Parameters : None
#
#Return value :
# 1
#---------------------------------------------------------------------------------------------------
def main_process(self):
action = self.get_argument('action', '')
if action == 'listuser':
fullinfo = self.get_argument('fullinfo', 'on')
self.write({'code': 0, 'msg': u'成功获取用户列表!', 'data': listuser(fullinfo=='on')})
elif action == 'listgroup':
fullinfo = self.get_argument('fullinfo', 'on')
self.write({'code': 0, 'msg': u'成功获取用户组列表!', 'data': listgroup(fullinfo=='on')})
elif action in ('useradd', 'usermod'):
if self.config.get('runtime', 'mode') == 'demo':
self.write({'code': -1, 'msg': u'DEMO状态不允许添加和修改用户!'})
return
pw_name = self.get_argument('pw_name', '')
pw_gecos = self.get_argument('pw_gecos', '')
pw_gname = self.get_argument('pw_gname', '')
pw_dir = self.get_argument('pw_dir', '')
pw_shell = self.get_argument('pw_shell', '')
pw_passwd = self.get_argument('pw_passwd', '')
pw_passwdc = self.get_argument('pw_passwdc', '')
lock = self.get_argument('lock', '')
lock = (lock == 'on') and True or False
if pw_passwd != pw_passwdc:
self.write({'code': -1, 'msg': u'两次输入的密码不一致!'})
return
options = {
'pw_gecos': _u(pw_gecos),
'pw_gname': _u(pw_gname),
'pw_dir': _u(pw_dir),
'pw_shell': _u(pw_shell),
'lock': lock
}
if len(pw_passwd)>0: options['pw_passwd'] = _u(pw_passwd)
if action == 'useradd':
createhome = self.get_argument('createhome', '')
createhome = (createhome == 'on') and True or False
options['createhome'] = createhome
if useradd(_u(pw_name), options):
self.write({'code': 0, 'msg': u'用户添加成功!'})
else:
self.write({'code': -1, 'msg': u'用户添加失败!'})
elif action == 'usermod':
if usermod(_u(pw_name), options):
self.write({'code': 0, 'msg': u'用户修改成功!'})
else:
self.write({'code': -1, 'msg': u'用户修改失败!'})
elif action == 'userdel':
if self.config.get('runtime', 'mode') == 'demo':
self.write({'code': -1, 'msg': u'DEMO状态不允许删除用户!'})
return
pw_name = self.get_argument('pw_name', '')
if userdel(_u(pw_name)):
self.write({'code': 0, 'msg': u'用户删除成功!'})
else:
self.write({'code': -1, 'msg': u'用户删除失败!'})
elif action in ('groupadd', 'groupmod', 'groupdel'):
if self.config.get('runtime', 'mode') == 'demo':
self.write({'code': -1, 'msg': u'DEMO状态不允许操作用户组!'})
return
gr_name = self.get_argument('gr_name', '')
gr_newname = self.get_argument('gr_newname', '')
actionstr = {'groupadd': u'添加', 'groupmod': u'修改', 'groupdel': u'删除'};
if action == 'groupmod':
rt = groupmod(_u(gr_name), _u(gr_newname))
else:
rt = getattr(user, action)(_u(gr_name))
if rt:
self.write({'code': 0, 'msg': u'用户组%s成功!' % actionstr[action]})
else:
self.write({'code': -1, 'msg': u'用户组%s失败!' % actionstr[action]})
elif action in ('groupmems_add', 'groupmems_del'):
if self.config.get('runtime', 'mode') == 'demo':
self.write({'code': -1, 'msg': u'DEMO状态不允许操作用户组成员!'})
return
gr_name = self.get_argument('gr_name', '')
mem = self.get_argument('mem', '')
option = action.split('_')[1]
optionstr = {'add': u'添加', 'del': u'删除'}
if groupmems(_u(gr_name), _u(option), _u(mem)):
self.write({'code': 0, 'msg': u'用户组成员%s成功!' % optionstr[option]})
else:
self.write({'code': -1, 'msg': u'用户组成员%s成功!' % optionstr[option]})
def listuser(fullinfo=True):
if fullinfo:
# get lock status from /etc/shadow
locks = {}
with open('/etc/shadow') as f:
for line in f:
fields = line.split(':', 2)
locks[fields[0]] = fields[1].startswith('!')
users = pwd.getpwall()
for i, user in enumerate(users):
users[i] = dict((name, getattr(user, name))
for name in dir(user)
if not name.startswith('__'))
try:
gname = grp.getgrgid(user.pw_gid).gr_name
except:
gname = ''
users[i]['pw_gname'] = gname
users[i]['lock'] = locks[user.pw_name]
else:
users = [pw.pw_name for pw in pwd.getpwall()]
return users
def passwd(username, password):
try:
cmd = shlex.split('passwd \'%s\'' % username)
except:
return False
child = pexpect.spawn(cmd[0], cmd[1:])
i = child.expect(['New password', 'Unknown user name'])
if i == 1:
if child.isalive(): child.wait()
return False
child.sendline(password)
child.expect('Retype new password')
child.sendline(password)
i = child.expect(['updated successfully', pexpect.EOF])
if child.isalive(): child.wait()
return i == 0
def useradd(username, options):
# command like: useradd -c 'New User' -g newgroup -s /bin/bash -m newuser
cmd = ['useradd']
if options.has_key('pw_gname') and options['pw_gname']:
cmd.extend(['-g', options['pw_gname']])
if options.has_key('pw_gecos'):
cmd.extend(['-c', options['pw_gecos']])
if options.has_key('pw_shell'):
cmd.extend(['-s', options['pw_shell']])
if options.has_key('createhome') and options['createhome']:
cmd.append('-m')
else:
cmd.append('-M')
cmd.append(username)
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
if p.wait() != 0: return False
# check if need to lock/unlock the new account
if options.has_key('lock') and options['lock']:
if not usermod(username, {'lock': options['lock']}): return False
# check if need to set passwd
if options.has_key('pw_passwd'):
if not passwd(username, options['pw_passwd']): return False
return True
def usermod(username, options):
user = pwd.getpwnam(username)
# command like: usermod -c 'I am root' -g root -d /root/ -s /bin/bash -U root
cmd = ['usermod']
if options.has_key('pw_gname'):
cmd.extend(['-g', options['pw_gname']])
if options.has_key('pw_gecos') and options['pw_gecos'] != user.pw_gecos:
cmd.extend(['-c', options['pw_gecos']])
if options.has_key('pw_dir') and options['pw_dir'] != user.pw_dir:
cmd.extend(['-d', options['pw_dir']])
if options.has_key('pw_shell') and options['pw_shell'] != user.pw_shell:
cmd.extend(['-s', options['pw_shell']])
if options.has_key('lock') and options['lock']:
cmd.append('-L')
else:
cmd.append('-U')
cmd.append(username)
if len(cmd) > 2:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
msg = p.stderr.read()
if p.wait() != 0:
if not 'no changes' in msg:
return False
# check if need to change passwd
if options.has_key('pw_passwd'):
if not passwd(username, options['pw_passwd']): return False
return True
def userdel(username):
p = subprocess.Popen(['userdel', username],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
return p.wait() == 0
def listgroup(fullinfo=True):
if fullinfo:
groups = grp.getgrall()
for i, group in enumerate(groups):
groups[i] = dict((name, getattr(group, name))
for name in dir(group)
if not name.startswith('__'))
else:
groups = [gr.gr_name for gr in grp.getgrall()]
return groups
def groupadd(groupname):
p = subprocess.Popen(['groupadd', groupname],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
return p.wait() == 0
def groupmod(groupname, newgroupname):
p = subprocess.Popen(['groupmod', '-n', newgroupname, groupname],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
return p.wait() == 0
def groupdel(groupname):
p = subprocess.Popen(['groupdel', groupname],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
return p.wait() == 0
def groupmems(groupname, option, mem):
cmd = ['groupmems', '-g', groupname]
if option == 'add':
cmd.extend(['-a', mem])
elif option == 'del':
cmd.extend(['-d', mem])
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
p.stdout.read()
p.stderr.read()
return p.wait() == 0
|
apache-2.0
| -5,547,206,471,437,395,000
| 33.801347
| 100
| 0.543731
| false
| 3.362394
| false
| false
| false
|
Fat-Zer/FreeCAD_sf_master
|
src/Tools/updatefromcrowdin.py
|
11
|
12203
|
#!/usr/bin/python
#***************************************************************************
#* *
#* Copyright (c) 2009 Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Library General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
from __future__ import print_function
'''
Usage:
updatefromcrowdin.py [options] [LANGCODE] [LANGCODE LANGCODE...]
Example:
./updatefromcrowdin.py [-d <directory>] fr nl pt_BR
Options:
-h or --help : prints this help text
-d or --directory : specifies a directory containing unzipped translation folders
-z or --zipfile : specifies a path to the freecad.zip file
-m or --module : specifies a single module name to be updated, instead of all modules
If no argument is specified, the command will try to find and use a freecad.zip file
located in the current src/Tools directory (such as the one obtained by running
updatecrowdin.py download) and will extract the default languages specified below
in this file.
This command must be run from its current source tree location (/src/Tools)
so it can find the correct places to put the translation files. If run with
no arguments, the latest translations from crowdin will be downloaded, unzipped
and put to the correct locations. The necessary renaming of files and .qm generation
will be taken care of. The qrc files will also be updated when new
translations are added.
NOTE! The crowdin site only allows to download "builds" (zipped archives)
which must be built prior to downloading. This means a build might not
reflect the latest state of the translations. Better always make a build before
using this script!
You can specify a directory with the -d option if you already downloaded
and extracted the build, or you can specify a single module to update with -m.
You can also run the script without any language code, in which case all the
languages contained in the archive or directory will be added.
'''
import sys, os, shutil, tempfile, zipfile, getopt, StringIO, re
crowdinpath = "http://crowdin.net/download/project/freecad.zip"
# locations list contains Module name, relative path to translation folder and relative path to qrc file
locations = [["AddonManager","../Mod/AddonManager/Resources/translations","../Mod/AddonManager/Resources/AddonManager.qrc"],
["Arch","../Mod/Arch/Resources/translations","../Mod/Arch/Resources/Arch.qrc"],
["Assembly","../Mod/Assembly/Gui/Resources/translations","../Mod/Assembly/Gui/Resources/Assembly.qrc"],
["draft","../Mod/Draft/Resources/translations","../Mod/Draft/Resources/Draft.qrc"],
["Drawing","../Mod/Drawing/Gui/Resources/translations","../Mod/Drawing/Gui/Resources/Drawing.qrc"],
["Fem","../Mod/Fem/Gui/Resources/translations","../Mod/Fem/Gui/Resources/Fem.qrc"],
["FreeCAD","../Gui/Language","../Gui/Language/translation.qrc"],
["Image","../Mod/Image/Gui/Resources/translations","../Mod/Image/Gui/Resources/Image.qrc"],
["Mesh","../Mod/Mesh/Gui/Resources/translations","../Mod/Mesh/Gui/Resources/Mesh.qrc"],
["MeshPart","../Mod/MeshPart/Gui/Resources/translations","../Mod/MeshPart/Gui/Resources/MeshPart.qrc"],
["OpenSCAD","../Mod/OpenSCAD/Resources/translations","../Mod/OpenSCAD/Resources/OpenSCAD.qrc"],
["Part","../Mod/Part/Gui/Resources/translations","../Mod/Part/Gui/Resources/Part.qrc"],
["PartDesign","../Mod/PartDesign/Gui/Resources/translations","../Mod/PartDesign/Gui/Resources/PartDesign.qrc"],
["Points","../Mod/Points/Gui/Resources/translations","../Mod/Points/Gui/Resources/Points.qrc"],
["Raytracing","../Mod/Raytracing/Gui/Resources/translations","../Mod/Raytracing/Gui/Resources/Raytracing.qrc"],
["ReverseEngineering","../Mod/ReverseEngineering/Gui/Resources/translations","../Mod/ReverseEngineering/Gui/Resources/ReverseEngineering.qrc"],
["Robot","../Mod/Robot/Gui/Resources/translations","../Mod/Robot/Gui/Resources/Robot.qrc"],
["Sketcher","../Mod/Sketcher/Gui/Resources/translations","../Mod/Sketcher/Gui/Resources/Sketcher.qrc"],
["StartPage","../Mod/Start/Gui/Resources/translations","../Mod/Start/Gui/Resources/Start.qrc"],
["Test","../Mod/Test/Gui/Resources/translations","../Mod/Test/Gui/Resources/Test.qrc"],
["Ship","../Mod/Ship/resources/translations","../Mod/Ship/resources/Ship.qrc"],
["Plot","../Mod/Plot/resources/translations","../Mod/Plot/resources/Plot.qrc"],
["Web","../Mod/Web/Gui/Resources/translations","../Mod/Web/Gui/Resources/Web.qrc"],
["Spreadsheet","../Mod/Spreadsheet/Gui/Resources/translations","../Mod/Spreadsheet/Gui/Resources/Spreadsheet.qrc"],
["Path","../Mod/Path/Gui/Resources/translations","../Mod/Path/Gui/Resources/Path.qrc"],
["Tux","../Mod/Tux/Resources/translations","../Mod/Tux/Resources/Tux.qrc"],
["TechDraw","../Mod/TechDraw/Gui/Resources/translations","../Mod/TechDraw/Gui/Resources/TechDraw.qrc"],
]
default_languages = "af ar ca cs de el es-ES eu fi fil fr gl hr hu id it ja kab ko lt nl no pl pt-BR pt-PT ro ru sk sl sr sv-SE tr uk val-ES vi zh-CN zh-TW"
def updateqrc(qrcpath,lncode):
"updates a qrc file with the given translation entry"
print("opening " + qrcpath + "...")
# getting qrc file contents
if not os.path.exists(qrcpath):
print("ERROR: Resource file " + qrcpath + " doesn't exist")
sys.exit()
f = open(qrcpath,"ro")
resources = []
for l in f.readlines():
resources.append(l)
f.close()
# checking for existing entry
name = "_" + lncode + ".qm"
for r in resources:
if name in r:
print("language already exists in qrc file")
return
# find the latest qm line
pos = None
for i in range(len(resources)):
if ".qm" in resources[i]:
pos = i
if pos is None:
print("No existing .qm file in this resource. Appending to the end position")
for i in range(len(resources)):
if "</qresource>" in resources[i]:
pos = i-1
if pos is None:
print("ERROR: couldn't add qm files to this resource: " + qrcpath)
sys.exit()
# inserting new entry just after the last one
line = resources[pos]
if ".qm" in line:
line = re.sub("_.*\.qm","_"+lncode+".qm",line)
else:
modname = os.path.splitext(os.path.basename(qrcpath))[0]
line = " <file>translations/"+modname+"_"+lncode+".qm</file>\n"
#print "ERROR: no existing qm entry in this resource: Please add one manually " + qrcpath
#sys.exit()
print("inserting line: ",line)
resources.insert(pos+1,line)
# writing the file
f = open(qrcpath,"wb")
for r in resources:
f.write(r)
f.close()
print("successfully updated ",qrcpath)
def doFile(tsfilepath,targetpath,lncode,qrcpath):
"updates a single ts file, and creates a corresponding qm file"
basename = os.path.basename(tsfilepath)[:-3]
# special fix of the draft filename...
if basename == "draft": basename = "Draft"
newname = basename + "_" + lncode + ".ts"
newpath = targetpath + os.sep + newname
shutil.copyfile(tsfilepath, newpath)
os.system("lrelease " + newpath)
newqm = targetpath + os.sep + basename + "_" + lncode + ".qm"
if not os.path.exists(newqm):
print("ERROR: impossible to create " + newqm + ", aborting")
sys.exit()
updateqrc(qrcpath,lncode)
def doLanguage(lncode,fmodule=""):
" treats a single language"
if lncode == "en":
# never treat "english" translation... For now :)
return
mods = []
if fmodule:
for l in locations:
if l[0].upper() == fmodule.upper():
mods = [l]
else:
mods = locations
if not mods:
print("Error: Couldn't find module "+fmodule)
sys.exit()
for target in mods:
basefilepath = tempfolder + os.sep + lncode + os.sep + target[0] + ".ts"
targetpath = os.path.abspath(target[1])
qrcpath = os.path.abspath(target[2])
doFile(basefilepath,targetpath,lncode,qrcpath)
print(lncode + " done!")
if __name__ == "__main__":
inputdir = ""
inputzip = ""
fmodule = ""
args = sys.argv[1:]
if len(args) < 1:
inputzip = os.path.join(os.path.abspath(os.curdir),"freecad.zip")
if os.path.exists(inputzip):
print("Using zip file found at",inputzip)
else:
print(__doc__)
sys.exit()
else:
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:z:m:", ["help", "directory=","zipfile=", "module="])
except getopt.GetoptError:
print(__doc__)
sys.exit()
# checking on the options
for o, a in opts:
if o in ("-h", "--help"):
print(__doc__)
sys.exit()
if o in ("-d", "--directory"):
inputdir = a
if o in ("-z", "--zipfile"):
inputzip = a
if o in ("-m", "--module"):
fmodule = a
currentfolder = os.getcwd()
if inputdir:
tempfolder = os.path.realpath(inputdir)
if not os.path.exists(tempfolder):
print("ERROR: " + tempfolder + " not found")
sys.exit()
elif inputzip:
tempfolder = tempfile.mkdtemp()
print("creating temp folder " + tempfolder)
inputzip=os.path.realpath(inputzip)
if not os.path.exists(inputzip):
print("ERROR: " + inputzip + " not found")
sys.exit()
shutil.copy(inputzip,tempfolder)
os.chdir(tempfolder)
zfile=zipfile.ZipFile("freecad.zip")
print("extracting freecad.zip...")
zfile.extractall()
else:
tempfolder = tempfile.mkdtemp()
print("creating temp folder " + tempfolder)
os.chdir(tempfolder)
os.system("wget "+crowdinpath)
if not os.path.exists("freecad.zip"):
print("download failed!")
sys.exit()
zfile=zipfile.ZipFile("freecad.zip")
print("extracting freecad.zip...")
zfile.extractall()
os.chdir(currentfolder)
if not args:
#args = [o for o in os.listdir(tempfolder) if o != "freecad.zip"]
# do not treat all languages in the zip file. Some are not translated enough.
args = default_languages.split()
for ln in args:
if not os.path.exists(tempfolder + os.sep + ln):
print("ERROR: language path for " + ln + " not found!")
else:
doLanguage(ln,fmodule)
|
lgpl-2.1
| -2,449,099,759,645,174,300
| 44.87594
| 156
| 0.588462
| false
| 3.916239
| true
| false
| false
|
tlksio/tlksio
|
env/lib/python3.4/site-packages/logilab/astng/mixins.py
|
1
|
4348
|
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""This module contains some mixins for the different nodes.
"""
from logilab.astng.exceptions import (ASTNGBuildingException, InferenceError,
NotFoundError)
class BlockRangeMixIn(object):
"""override block range """
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
self.tolineno = lastchild.tolineno
self.blockstart_tolineno = self._blockstart_toline()
def _elsed_block_range(self, lineno, orelse, last=None):
"""handle block line numbers range for try/finally, for, if and while
statements
"""
if lineno == self.fromlineno:
return lineno, lineno
if orelse:
if lineno >= orelse[0].fromlineno:
return lineno, orelse[-1].tolineno
return lineno, orelse[0].fromlineno - 1
return lineno, last or self.tolineno
class FilterStmtsMixin(object):
"""Mixin for statement filtering and assignment type"""
def _get_filtered_stmts(self, _, node, _stmts, mystmt):
"""method used in _filter_stmts to get statemtents and trigger break"""
if self.statement() is mystmt:
# original node's statement is the assignment, only keep
# current node (gen exp, list comp)
return [node], True
return _stmts, False
def ass_type(self):
return self
class AssignTypeMixin(object):
def ass_type(self):
return self
def _get_filtered_stmts(self, lookup_node, node, _stmts, mystmt):
"""method used in filter_stmts"""
if self is mystmt:
return _stmts, True
if self.statement() is mystmt:
# original node's statement is the assignment, only keep
# current node (gen exp, list comp)
return [node], True
return _stmts, False
class ParentAssignTypeMixin(AssignTypeMixin):
def ass_type(self):
return self.parent.ass_type()
class FromImportMixIn(FilterStmtsMixin):
"""MixIn for From and Import Nodes"""
def _infer_name(self, frame, name):
return name
def do_import_module(self, modname):
"""return the ast for a module whose name is <modname> imported by <self>
"""
# handle special case where we are on a package node importing a module
# using the same name as the package, which may end in an infinite loop
# on relative imports
# XXX: no more needed ?
mymodule = self.root()
level = getattr(self, 'level', None) # Import as no level
# XXX we should investigate deeper if we really want to check
# importing itself: modname and mymodule.name be relative or absolute
if mymodule.relative_to_absolute_name(modname, level) == mymodule.name:
# FIXME: we used to raise InferenceError here, but why ?
return mymodule
try:
return mymodule.import_module(modname, level=level)
except ASTNGBuildingException:
raise InferenceError(modname)
except SyntaxError as ex:
raise InferenceError(str(ex))
def real_name(self, asname):
"""get name from 'as' name"""
for name, _asname in self.names:
if name == '*':
return asname
if not _asname:
name = name.split('.', 1)[0]
_asname = name
if asname == _asname:
return name
raise NotFoundError(asname)
|
mit
| -2,813,748,875,574,029,000
| 34.639344
| 81
| 0.635005
| false
| 4.082629
| false
| false
| false
|
naturali/tensorflow
|
tensorflow/python/kernel_tests/variable_scope_test.py
|
1
|
35348
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
class VariableScopeTest(tf.test.TestCase):
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
assert v == v1
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
assert v == v1
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(set(expected_names),
set([v.name for v in vs._vars.values()]))
def testVarScopeInitializer(self):
with self.test_session() as sess:
init = tf.constant_initializer(0.3)
with tf.variable_scope("tower") as tower:
with tf.variable_scope("foo", initializer=init):
v = tf.get_variable("v", [])
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
with tf.variable_scope(tower, initializer=init):
w = tf.get_variable("w", [])
sess.run(tf.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.3)
def testVarScopeDType(self):
with self.test_session():
with tf.variable_scope("tower") as tower:
with tf.variable_scope("foo", dtype=tf.float16):
v = tf.get_variable("v", [])
self.assertEqual(v.dtype, tf.float16_ref)
with tf.variable_scope(tower, dtype=tf.float16):
w = tf.get_variable("w", [])
self.assertEqual(w.dtype, tf.float16_ref)
def testInitFromNonTensorValue(self):
with self.test_session() as sess:
v = tf.get_variable("v", initializer=4, dtype=tf.int32)
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 4)
w = tf.get_variable("w",
initializer=numpy.array([1, 2, 3]),
dtype=tf.int64)
sess.run(tf.initialize_variables([w]))
self.assertAllClose(w.eval(), [1, 2, 3])
with self.assertRaises(TypeError):
tf.get_variable("x", initializer={})
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
with tf.variable_scope("tower"):
with tf.variable_scope("caching", caching_device=caching_device):
v = tf.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with tf.variable_scope("child"):
v2 = tf.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with tf.variable_scope("not_cached", caching_device=""):
v2_not_cached = tf.get_variable("v", [])
self.assertFalse(
v2_not_cached.value().device.startswith(caching_device))
with tf.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = tf.get_variable("v", [])
self.assertFalse(
v2_identity_device.value().device.startswith(caching_device))
with tf.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = tf.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = tf.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
def testVarScopeRegularizer(self):
with self.test_session() as sess:
init = tf.constant_initializer(0.3)
def regularizer1(v):
return tf.reduce_mean(v) + 0.1
def regularizer2(v):
return tf.reduce_mean(v) + 0.2
with tf.variable_scope("tower", regularizer=regularizer1) as tower:
with tf.variable_scope("foo", initializer=init):
v = tf.get_variable("v", [])
sess.run(tf.initialize_variables([v]))
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(losses[0].eval(), 0.4)
with tf.variable_scope(tower, initializer=init) as vs:
u = tf.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = tf.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = tf.get_variable("x", [], regularizer=tf.no_regularizer)
with tf.variable_scope("baz", regularizer=tf.no_regularizer):
y = tf.get_variable("y", [])
vs.set_regularizer(tf.no_regularizer)
z = tf.get_variable("z", [])
# Check results.
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
sess.run(tf.initialize_variables([u, w, x, y, z]))
self.assertAllClose(losses[0].eval(), 0.4)
self.assertAllClose(losses[1].eval(), 0.4)
self.assertAllClose(losses[2].eval(), 0.5)
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", []) # "v" is alredy there, reused
losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
def testIntializeFromValue(self):
with self.test_session() as sess:
init = tf.constant(0.1)
w = tf.get_variable("v", initializer=init)
sess.run(tf.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
tf.get_variable("u", [1], initializer=init)
with tf.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = tf.get_variable("v")
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = tf.constant(1, dtype=tf.int32)
t = tf.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, tf.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
tf.get_variable("s", initializer=init, dtype=tf.float64)
def testControlDeps(self):
with self.test_session() as sess:
v0 = tf.get_variable("v0", [1], initializer=tf.constant_initializer(0))
with tf.control_dependencies([v0.value()]):
v1 = tf.get_variable("v1", [1], initializer=tf.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testControlFlow(self):
with self.test_session() as sess:
v0 = tf.get_variable("v0", [], initializer=tf.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = tf.get_variable("v1", [1], initializer=tf.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = tf.get_variable("v2", [1], initializer=tf.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(tf.less(v0, 10),
var_in_then_clause,
var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(tf.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
with self.test_session() as sess:
init = tf.constant_initializer(0.3)
with tf.variable_scope("foo"):
new_init1 = tf.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
tf.get_variable_scope().set_initializer(init)
v = tf.get_variable("v", [])
sess.run(tf.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
# Check that we can set reuse.
tf.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
tf.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = tf.get_variable_scope().initializer
self.assertEqual(new_init, None)
def testVarScope(self):
with self.test_session():
with tf.variable_scope("tower") as tower:
self.assertEqual(tower.name, "tower")
with tf.name_scope("scope") as sc:
self.assertEqual(sc, "tower/scope/")
with tf.variable_scope("foo"):
with tf.variable_scope("bar") as bar:
self.assertEqual(bar.name, "foo/bar")
with tf.name_scope("scope") as sc:
self.assertEqual(sc, "foo/bar/scope/")
with tf.variable_scope("foo"):
with tf.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower")
with tf.name_scope("scope") as sc:
self.assertEqual(sc, "foo_1/tower/scope/")
def testVarScopeNameScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with tf.variable_scope("tower") as tower:
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with tf.variable_scope(tower): # Re-entering acts like another "tower".
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with tf.variable_scope("tower"): # Re-entering by string acts the same.
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_2/scope2/")
with tf.name_scope("scope3"):
with tf.variable_scope("tower"):
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope3/tower/scope2/")
with tf.variable_scope(tower):
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope3/tower_1/scope2/")
root_var_scope = tf.get_variable_scope()
with tf.name_scope("scope4"):
with tf.variable_scope(root_var_scope):
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope4/scope2/")
def testVarScopeOriginalNameScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with tf.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with tf.name_scope("scope2"):
with tf.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with tf.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with tf.name_scope(tower.original_name_scope):
with tf.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with tf.name_scope("scope2"):
with tf.variable_scope(tower):
with tf.name_scope(tower.original_name_scope):
with tf.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
def testVarScopeObjectReuse(self):
with self.test_session():
vs = None
with tf.variable_scope("jump", reuse=True) as scope:
vs = scope
with tf.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with tf.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with tf.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
with tf.variable_scope("jump", reuse=False) as scope:
vs = scope
with tf.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with tf.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with tf.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
def testVarOpScope(self):
with self.test_session():
with tf.name_scope("scope1"):
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with tf.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
tf.get_variable("w", [])
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with tf.name_scope("scope2"):
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"default_1/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default_1/scope2/")
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.test_session():
with tf.variable_scope(None, "defaultScope1"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with tf.variable_scope(None, "defaultScope1"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with tf.variable_scope(None, "defaultScope"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope/layer/w:0")
with tf.variable_scope(None, "defaultScope1"):
with tf.variable_scope(None, "layer"):
self.assertEqual(tf.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
def testVarOpScopeReuse(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, reuse=True) as outer:
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarScopeGetVar(self):
with self.test_session():
with tf.variable_scope("root"):
with tf.variable_scope("towerA") as tower_a:
va = tf.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with tf.variable_scope(tower_a, reuse=True):
va2 = tf.get_variable("v", [1])
self.assertEqual(va2, va)
with tf.variable_scope("towerB"):
vb = tf.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with tf.variable_scope("towerA"):
va2 = tf.get_variable("v", [1])
with tf.variable_scope("towerA", reuse=True):
va2 = tf.get_variable("v", [1])
self.assertEqual(va2, va)
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with tf.variable_scope(tower_a, reuse=True):
va3 = tf.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with tf.variable_scope(tower_a, reuse=True):
with tf.variable_scope("baz"):
tf.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with tf.variable_scope(tower_a, reuse=True):
tf.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with tf.variable_scope(tower_a, reuse=True):
tf.get_variable("v", [1], dtype=tf.int32)
self.assertEqual("dtype" in str(exc.exception), True)
def testVarScopeOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
pass
with tf.variable_scope(outer):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_scope("default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with tf.variable_scope(outer, reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with tf.variable_scope("default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarScopeNestedOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope(outer):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with tf.variable_scope("default"):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with tf.variable_scope("default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
def testVarOpScopeReuseParam(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope("tower", "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer) as outer:
with tf.variable_scope("tower", "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarOpScopeReuseError(self):
with self.test_session():
with self.assertRaises(ValueError):
with tf.variable_scope(None, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/tower/w:0")
def testVarOpScopeOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
pass
with tf.variable_scope(outer, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with tf.variable_scope(outer, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarOpScopeNestedOuterScope(self):
with self.test_session():
with tf.variable_scope("outer") as outer:
with tf.variable_scope(outer, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with tf.variable_scope(outer, "default", reuse=True):
self.assertEqual(tf.get_variable("w", []).name,
"outer/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with tf.variable_scope(None, "default", []):
self.assertEqual(tf.get_variable("w", []).name,
"outer/default/w:0")
with tf.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testGetLocalVar(self):
with self.test_session():
# Check that local variable respects naming.
with tf.variable_scope("outer") as outer:
with tf.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
# Since variable is local, it should be in the local variable collection
# but not the the trainable collection.
self.assertIn(local_var, tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, tf.get_collection("foo"))
self.assertNotIn(
local_var, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with tf.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_local_variable("w", []).name,
"outer/w:0")
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(tf.test.TestCase):
def testResultNameMatchesRequested(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
v = tf.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = tf.get_collection(tf.GraphKeys.VARIABLES)
self.assertTrue("scope0/name0/part_0:0" in [x.name for x in variables])
self.assertTrue("scope0/name0/part_1:0" in [x.name for x in variables])
self.assertFalse("scope0/name0/part_2:0" in [x.name for x in variables])
def testBreaksIfPartitioningChanges(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
tf.get_variable("name0", shape=(3, 1, 1))
with tf.variable_scope("scope0",
partitioner=axis0_into3_partitioner,
reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
tf.get_variable("name0", shape=(3, 1, 1))
with tf.variable_scope("scope0",
partitioner=axis0_into1_partitioner,
reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
tf.get_variable("name0", shape=(3, 1, 1))
def testReturnsExistingConcatenatedValueIfReuse(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
v_concat = tf.get_variable("name0", shape=(3, 1, 1))
tf.get_variable_scope().reuse_variables()
v_concat_2 = tf.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
def testAllowsReuseWithoutPartitioner(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner):
v = tf.get_variable("name0", shape=(3, 1, 1))
with tf.variable_scope("scope0", reuse=True):
v_reused = tf.get_variable("name0")
self.assertEqual(v, v_reused)
def testPropagatePartitionerOnReopening(self):
with tf.variable_scope("scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with tf.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
def testPartitionConcatenatesAlongCorrectAxis(self):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with tf.variable_scope("root"):
v0 = tf.get_variable("n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = tf.get_variable("n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = tf.get_default_graph().get_tensor_by_name("root/n0/part_0:0")
n0_1 = tf.get_default_graph().get_tensor_by_name("root/n0/part_1:0")
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = tf.get_default_graph().get_tensor_by_name("root/n1/part_0:0")
n1_1 = tf.get_default_graph().get_tensor_by_name("root/n1/part_1:0")
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
class VariableScopeWithCustomGetterTest(tf.test.TestCase):
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
with tf.variable_scope("scope0", custom_getter=3):
tf.get_variable("name0")
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
tf.get_variable("name0", custom_getter=3)
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with tf.variable_scope("scope", custom_getter=custom_getter) as scope:
v = tf.get_variable("v", [1])
with tf.variable_scope(scope, reuse=True):
v2 = tf.get_variable("v", [1])
with tf.variable_scope("new_scope") as new_scope:
v3 = tf.get_variable("v3", [1])
with tf.variable_scope(new_scope, reuse=True, custom_getter=custom_getter):
v4 = tf.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with tf.name_scope("custom_getter"):
return g_0 + g_1
with tf.variable_scope("scope", custom_getter=custom_getter):
v = tf.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = tf.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.test_session() as sess:
tf.initialize_all_variables().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
class PartitionInfoTest(tf.test.TestCase):
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
| -7,995,408,519,929,527,000
| 40.931198
| 80
| 0.606286
| false
| 3.542239
| true
| false
| false
|
madedotcom/photon-pump
|
test/conversations/test_read_all_events_stream_conversation.py
|
1
|
4703
|
from asyncio import Queue
from uuid import uuid4
import pytest
from photonpump import messages as msg, exceptions
from photonpump import messages_pb2 as proto
from photonpump.conversations import ReadAllEvents
@pytest.mark.asyncio
async def test_read_all_request():
output = Queue()
convo = ReadAllEvents(msg.Position(10, 11))
await convo.start(output)
request = await output.get()
body = proto.ReadAllEvents()
body.ParseFromString(request.payload)
assert request.command is msg.TcpCommand.ReadAllEventsForward
assert body.commit_position == 10
assert body.prepare_position == 11
assert body.resolve_link_tos is True
assert body.require_master is False
assert body.max_count == 100
@pytest.mark.asyncio
async def test_read_all_backward():
output = Queue()
convo = ReadAllEvents(
from_position=msg.Position(10, 11),
direction=msg.StreamDirection.Backward,
max_count=20,
)
await convo.start(output)
request = await output.get()
body = proto.ReadAllEvents()
body.ParseFromString(request.payload)
assert request.command is msg.TcpCommand.ReadAllEventsBackward
assert body.commit_position == 10
assert body.prepare_position == 11
assert body.resolve_link_tos is True
assert body.require_master is False
assert body.max_count == 20
@pytest.mark.asyncio
async def test_read_all_success():
event_1_id = uuid4()
event_2_id = uuid4()
convo = ReadAllEvents()
response = proto.ReadAllEventsCompleted()
response.result = msg.ReadEventResult.Success
response.next_commit_position = 10
response.next_prepare_position = 10
response.commit_position = 9
response.prepare_position = 9
event_1 = proto.ResolvedEvent()
event_1.commit_position = 8
event_1.prepare_position = 8
event_1.event.event_stream_id = "stream-123"
event_1.event.event_number = 32
event_1.event.event_id = event_1_id.bytes_le
event_1.event.event_type = "event-type"
event_1.event.data_content_type = msg.ContentType.Json
event_1.event.metadata_content_type = msg.ContentType.Binary
event_1.event.data = """
{
'color': 'red',
'winner': true
}
""".encode(
"UTF-8"
)
event_2 = proto.ResolvedEvent()
event_2.CopyFrom(event_1)
event_2.event.event_stream_id = "stream-456"
event_2.event.event_type = "event-2-type"
event_2.event.event_id = event_2_id.bytes_le
event_2.event.event_number = 32
response.events.extend([event_1, event_2])
await convo.respond_to(
msg.InboundMessage(
uuid4(),
msg.TcpCommand.ReadAllEventsForwardCompleted,
response.SerializeToString(),
),
None,
)
result = await convo.result
assert isinstance(result, msg.AllStreamSlice)
[event_1, event_2] = result.events
assert event_1.stream == "stream-123"
assert event_1.id == event_1_id
assert event_1.type == "event-type"
assert event_1.event_number == 32
assert event_2.stream == "stream-456"
assert event_2.id == event_2_id
assert event_2.type == "event-2-type"
assert event_2.event_number == 32
@pytest.mark.asyncio
async def test_all_events_error():
convo = ReadAllEvents()
response = proto.ReadAllEventsCompleted()
response.result = msg.ReadAllResult.Error
response.next_commit_position = 10
response.next_prepare_position = 10
response.commit_position = 9
response.prepare_position = 9
response.error = "Something really weird just happened"
await convo.respond_to(
msg.InboundMessage(
uuid4(),
msg.TcpCommand.ReadAllEventsForwardCompleted,
response.SerializeToString(),
),
None,
)
with pytest.raises(exceptions.ReadError) as exn:
await convo.result
assert exn.stream == "$all"
assert exn.conversation_id == convo.conversation_id
@pytest.mark.asyncio
async def test_all_events_access_denied():
convo = ReadAllEvents()
response = proto.ReadAllEventsCompleted()
response.result = msg.ReadAllResult.AccessDenied
response.next_commit_position = 10
response.next_prepare_position = 10
response.commit_position = 9
response.prepare_position = 9
await convo.respond_to(
msg.InboundMessage(
uuid4(), msg.TcpCommand.ReadAllEventsForward, response.SerializeToString()
),
None,
)
with pytest.raises(exceptions.AccessDenied) as exn:
await convo.result
assert exn.conversation_id == convo.conversation_id
assert exn.conversation_type == "ReadAllEvents"
|
mit
| 23,669,084,093,015,520
| 26.828402
| 86
| 0.672549
| false
| 3.651398
| true
| false
| false
|
unixunion/python-libsolace
|
bin/solace-list-clients.py
|
1
|
4986
|
#!/usr/bin/env python
"""
Show solace clients and counts, optionally pump all stats into influxdb
"""
import logging
import sys
logging.basicConfig(format='[%(module)s] %(filename)s:%(lineno)s %(asctime)s %(levelname)s %(message)s',
stream=sys.stderr)
import libsolace.settingsloader as settings
from libsolace.SolaceAPI import SolaceAPI
from libsolace.SolaceXMLBuilder import SolaceXMLBuilder
from optparse import OptionParser
import simplejson as json
import sys
import pprint
import demjson
from time import gmtime, strftime
import time
pp = pprint.PrettyPrinter(indent=4, width=20)
if __name__ == '__main__':
""" parse opts, read site.xml, start provisioning vpns. """
usage = "list all vpns in an environment"
parser = OptionParser(usage=usage)
parser.add_option("-e", "--env", "--environment", action="store", type="string", dest="env",
help="environment to run job in eg:[ dev | ci1 | si1 | qa1 | pt1 | prod ]")
parser.add_option("-d", "--debug", action="store_true", dest="debug",
default=False, help="toggles solace debug mode")
parser.add_option("--details", action="store_true", dest="details", help="Show client details", default=False)
parser.add_option("--stats", action="store_true", dest="stats", help="Show client stats", default=False)
parser.add_option("--client", action="store", type="string", dest="client", help="client filter e.g. 'dev_*'",
default="*")
parser.add_option("--influxdb", action="store_true", dest="influxdb", help="influxdb url and port", default=False)
parser.add_option("--influxdb-host", action="store", type="string", dest="influxdb_host", help="influxdb hostname", default="defiant")
parser.add_option("--influxdb-port", action="store", type="int", dest="influxdb_port", help="influxdb port", default=8086)
parser.add_option("--influxdb-user", action="store", type="string", dest="influxdb_user", help="influxdb user", default="root")
parser.add_option("--influxdb-pass", action="store", type="string", dest="influxdb_pass", help="influxdb pass", default="root")
parser.add_option("--influxdb-db", action="store", type="string", dest="influxdb_db", help="influxdb db name", default="solace-clients")
(options, args) = parser.parse_args()
if not options.env:
parser.print_help()
sys.exit()
if options.debug:
logging.getLogger().setLevel(logging.DEBUG)
if options.influxdb:
logging.info("Connecting to influxdb")
from influxdb import InfluxDBClient
try:
client = InfluxDBClient(options.influxdb_host, options.influxdb_port, options.influxdb_user,
options.influxdb_pass, options.influxdb_db)
try:
client.create_database(options.influxdb_db)
except Exception, e:
logging.warn("Unable to create database, does it already exist?")
except Exception, e:
logging.error("Unable to connect to influxdb")
sys.exit(1)
# forces read-only
options.testmode = True
settings.env = options.env.lower()
logging.info("Connecting to appliance in %s, testmode:%s" % (settings.env, options.testmode))
connection = SolaceAPI(settings.env, testmode=options.testmode)
if options.details:
connection.x = SolaceXMLBuilder("show clients details")
connection.x.show.client.name = options.client
connection.x.show.client.detais
elif options.stats:
connection.x = SolaceXMLBuilder("show clients stats")
connection.x.show.client.name = options.client
connection.x.show.client.stats
# get the clients
clients = connection.rpc(str(connection.x), primaryOnly=True)
count = 0
# print clients[0]
timeNow = strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
startTime = time.time()
for c in clients[0]['rpc-reply']['rpc']['show']['client']['primary-virtual-router']['client']:
j = demjson.encode(c)
p = json.loads(j)
if options.stats:
t = {}
for k in p["stats"]:
logging.debug("Key: %s value %s" % (k, p["stats"][k]))
try:
t[k] = long(p["stats"][k])
except Exception, ve:
logging.debug("skipping")
pass
json_body = [{
"measurement": "client-stats",
"tags": {
"message-vpn": p['message-vpn'],
"name": p['name']
},
"fields": t,
"time": timeNow
}]
# print json.dumps(json_body)
# print json.dumps(json_body, sort_keys=False, indent=4, separators=(',', ': '))
client.write_points(json_body)
logging.info("Total Clients: %s" % count)
logging.info("Time Taken: %s" % (time.time()-startTime) )
|
mit
| -2,089,378,427,571,660,000
| 37.953125
| 140
| 0.609306
| false
| 3.783005
| false
| false
| false
|
espensirnes/paneltime
|
paneltime/system/system_arguments.py
|
1
|
8361
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#This module contains the arguments class used to handle regression arguments
import numpy as np
import functions as fu
import stat_functions as stat
class arguments:
"""Sets initial arguments and stores static properties of the arguments"""
def __init__(self,panel, args_d_old):
p,q,d,k,m=panel.pqdkm
self.args_d_old=args_d_old
self.categories=['beta','rho','lambda','gamma','psi','omega','z']
self.args_init,self.args_d_OLS, self.args_d_restricted=dict(),dict(),dict()
self.panel=panel
self.equations=[]
self.n_equations=len(panel.X)
self.n_args=[]
self.positions=dict()
self.positions_map=dict()
self.name_positions_map=dict()
arg_count=0
self.names_v=[]
self.eq_number_v=[]
if args_d_old is None:
args_d_old=[None]*self.n_equations
for i in range(self.n_equations):
e=equation(panel.X[i],panel.Y[i],panel.W,self, args_d_old[i],i,arg_count,panel.X_names[i])
self.equations.append(e)
self.n_args.append(e.n_args)
self.args_init[i]=e.args_init
self.args_d_OLS[i]=e.args_d_OLS
self.args_d_restricted[i]=e.args_d_restricted
arg_count+=e.n_args
self.args_init['rho']=np.diag(np.ones(self.n_equations))
self.args_d_OLS['rho']=np.diag(np.ones(self.n_equations))
self.args_d_restricted['rho']=np.diag(np.ones(self.n_equations))
self.n_args_eq=arg_count
self.n_args_tot=int(np.sum(self.n_args)+(self.n_equations-1)*self.n_equations/2)
add_rho_names(self.names_v,arg_count)
self.eq_number_v.extend([None]*(self.n_args_tot-arg_count))
def system_conv_to_dicts(self,args):
args_d=dict()
if type(args[0])==dict:
return args
for eq in self.equations:
d=dict()
for category in self.categories:
rng=eq.positions[category]
s=self.args_init[eq.id][category].shape
d[category]=args[rng].reshape(s)
args_d[eq.id]=d
args_d['rho']=rho_list_to_matrix(args[self.n_args_eq:],self.n_equations)
return args_d
def system_conv_to_vector(self,args):
args_v=[]
if type(args[0])!=dict:
return args
n=0
for i in range(self.n_equations):
for name in self.categories:
args_v.extend(args[i][name].flatten())
n+=len(args[i][name])
args_v.extend(rho_matrix_to_list(args['rho'],self.n_equations))
args_v=np.array(args_v)
return args_v
def rho_definitions(self):
n=self.n_args_eq
self.rho_position_list=[]
r=range(n)
x=[[[min((i,j)),max((i,j))] for i in r] for j in r]
self.rho_position_matrix=np.array([[str(x[i,j]) for i in r] for j in r])
for i in range(n):
for j in range(i,n):
self.names_v.append('System reg. rho(%s,%s)' %(i,j))
self.rho_position_list.append[x[i,j]]
def rho_list_to_matrix(self,lst):
n=len(self.rho_position_list)
m=np.zeros((n,n))
for k in range(n):
i,j=self.rho_position_list[k]
m[i,j]=lst[k]
m[j,i]=lst[k]
return m
def rho_matrix_to_list(self,m):
n=len(self.rho_position_list)
lst=np.zeros(n)
for k in range(n):
i,j=self.rho_position_list[k]
lst[k]=m[i,j]
return lst
class equation:
def __init__(self,X,Y,W,arguments,args_d_old,i,arg_count,X_names):
a=arguments
self.id=i
p,q,d,k,m=panel.pqdkm
self.args_init,self.args_d_OLS, self.args_d_restricted=set_init_args(X,Y,W,args_d_old,p, d, q, m, k,a.panel)
self.names_d=get_namevector(a.panel,p, q, m, k,X_names,a,i)
self.position_defs(a,arg_count,X_names)
self.args_v=conv_to_vector(self.args_init,a.categories)
self.n_args=len(self.args_v)
self.args_rng=range(arg_count,arg_count+self.n_args)
a.eq_number_v.extend([i]*self.n_args)
def position_defs(self,system,arg_count,X_names):
"""Defines positions in vector argument in each equation for the system args_v vector"""
self.positions_map=dict()#a dictionary of indicies containing the string name and sub-position of index within the category
self.positions=dict()#a dictionary of category strings containing the index range of the category
self.beta_map=dict()
k=arg_count
for category in system.categories:
n=len(self.args_init[category])
rng=range(k,k+n)
self.positions[category]=rng#self.positions[<category>]=range(<system position start>,<system position end>)
if category in system.positions:
system.positions[category].append(rng)
else:
system.positions[category]=[rng]
for j in rng:
self.positions_map[j]=[category,j-k]#self.positions_map[<system position>]=<category>,<equation position>
system.positions_map[j]=[self.id,category,j-k]#system.positions_map[<system position>]=<equation number>,<category>,<equation position>
k+=n
for i in range(len(X_names)):
self.beta_map[X_names[i]]=self.positions['beta'][i]
def initargs(X,Y,W,args_old,p,d,q,m,k,panel):
N,T,k=X.shape
if args_old is None:
armacoefs=0
else:
armacoefs=0
args=dict()
args['beta']=np.zeros((k,1))
args['omega']=np.zeros((W.shape[2],1))
args['rho']=np.ones(p)*armacoefs
args['lambda']=np.ones(q)*armacoefs
args['psi']=np.ones(m)*armacoefs
args['gamma']=np.ones(k)*armacoefs
args['z']=np.array([])
if m>0 and N>1:
args['omega'][0][0]=0
if m>0:
args['psi'][0]=0.00001
args['z']=np.array([0.00001])
return args
def set_init_args(X,Y,W,args_old,p,d,q,m,k,panel):
args=initargs(X,Y,W,args_old,p, d, q, m, k, panel)
args_restricted=fu.copy_array_dict(args)
if panel.has_intercept:
args_restricted['beta'][0][0]=panel.mean(Y)
args_restricted['omega'][0][0]=np.log(panel.var(Y))
else:
args_restricted['omega'][0][0]=np.log(panel.var(Y,k=0,mean=0))
beta,e=stat.OLS(panel,X,Y,return_e=True)
args['beta']=beta
args['omega'][0]=np.log(np.sum(e**2*panel.included)/np.sum(panel.included))
args_OLS=fu.copy_array_dict(args)
if panel.m_zero:
args['omega'][0]=0
if not args_old is None:
args['beta']=insert_arg(args['beta'],args_old['beta'])
args['omega']=insert_arg(args['omega'],args_old['omega'])
args['rho']=insert_arg(args['rho'],args_old['rho'])
args['lambda']=insert_arg(args['lambda'],args_old['lambda'])
args['psi']=insert_arg(args['psi'],args_old['psi'])
args['gamma']=insert_arg(args['gamma'],args_old['gamma'])
args['z']=insert_arg(args['z'],args_old['z'])
return args,args_OLS, args_restricted
def conv_to_dict(args,categories,positions):
"""Converts a vector argument args to a dictionary argument. If args is a dict, it is returned unchanged"""
if type(args)==dict:
return args
else:
d=dict()
k=0
for i in categories:
n=len(positions[i])
rng=range(k,k+n)
d[i]=args[rng]
if i=='beta' or i=='omega':
d[i]=d[i].reshape((n,1))
k+=n
return d
def conv_to_vector(args,categories):
"""Converts a dict argument args to vector argument. if args is a vector, it is returned unchanged.\n
If args=None, the vector of self.args is returned"""
if type(args)==list or type(args)==np.ndarray:
return args
v=np.array([])
for category in categories:
s=args[category]
if type(s)==np.ndarray:
s=s.flatten()
v=np.concatenate((v,s))
return v
def get_namevector(panel,p, q, m, k,X_names,system,eq_num):
"""Creates a vector of the names of all regression varaibles,
including variables, ARIMA and GARCH terms. This defines the positions
of the variables througout the estimation."""
names_d=dict()
#sequence must match definition of categories in arguments.__init__:
#self.categories=['beta','rho','lambda','gamma','psi','omega','z']
eq_prefix='%02d|' %(eq_num,)
names_v=[eq_prefix+i for i in X_names]#copy variable names
names_d['beta']=names_v
add_names(p,eq_prefix+'AR term %s (p)','rho',names_d,names_v)
add_names(q,eq_prefix+'MA term %s (q)','lambda',names_d,names_v)
add_names(m,eq_prefix+'MACH term %s (m)','psi',names_d,names_v)
add_names(k,eq_prefix+'ARCH term %s (k)','gamma',names_d,names_v)
names_d['omega']=[eq_prefix+i for i in panel.heteroscedasticity_factors]#copy variable names
names_v.extend(names_d['omega'])
if m>0:
names_d['z']=[eq_prefix+'z in h(e,z)']
names_v.extend(names_d['z'])
n=len(system.names_v)
for i in range(len(names_v)):
system.name_positions_map[names_v[i]]=n+i
system.names_v.extend(names_v)
return names_d
def add_names(T,namesstr,category,d,names):
a=[]
for i in range(T):
a.append(namesstr %(i,))
names.extend(a)
d[category]=a
def insert_arg(arg,add):
n=min((len(arg),len(add)))
arg[:n]=add[:n]
return arg
|
gpl-3.0
| 7,984,326,016,556,555,000
| 28.234266
| 139
| 0.668222
| false
| 2.527509
| false
| false
| false
|
praekelt/panya-event
|
event/models.py
|
1
|
1514
|
from django.db import models
from django.core.urlresolvers import reverse
from ckeditor.fields import RichTextField
from panya.models import ModelBase
PROVINCES = (
('Eastern Cape', 'Eastern Cape'),
('Free State', 'Free State'),
('Gauteng', 'Gauteng'),
('KwaZulu-Natal', 'KwaZulu-Natal'),
('Limpopo', 'Limpopo'),
('Mpumalanga', 'Mpumalanga'),
('Northern Cape', 'Northern Cape'),
('North-West', 'North-West'),
('Western Cape', 'Western Cape'),
)
class Location(models.Model):
city = models.CharField(max_length=255, help_text='Name of the city.')
province = models.CharField(
choices=PROVINCES,
max_length=255,
help_text='Name of the province.'
)
def __unicode__(self):
return "%s, %s" % (self.city, self.province)
class Venue(models.Model):
name = models.CharField(max_length=255, help_text='A short descriptive name.')
address = models.CharField(max_length=512, help_text='Physical venue address.')
location = models.ForeignKey(
Location,
blank=True,
null=True,
help_text='Location of the venue.'
)
def __unicode__(self):
return self.name
class Event(ModelBase):
venue = models.ForeignKey(
Venue,
help_text='Venue where the event will take place.'
)
content = RichTextField(help_text='Full article detailing this event.')
def get_absolute_url(self):
return reverse('event_object_detail', kwargs={'slug': self.slug})
|
bsd-3-clause
| 7,660,587,681,568,411,000
| 28.115385
| 83
| 0.637384
| false
| 3.356984
| false
| false
| false
|
xindiguo/pythonSynapseUtils
|
pythonSynapseUtils/synutils.py
|
1
|
4081
|
#!/usr/bin/env python
import argparse
import os
import sys
import synapseclient
import hashlib
import string
script_path = os.path.dirname(__file__)
local_module_path = os.path.abspath(os.path.join(script_path,'lib'))
sys.path.append(local_module_path)
import s3
STATIC_BUCKET = "static.synapse.org"
def create_html_file(html_link):
#get a unique file name from txt/link
html_file_name = str(hashlib.md5(html_link).hexdigest()) + '.html'
f = open(html_file_name, 'w')
html_template = string.Template("""
<!DOCTYPE html>
<html>
<body>
<iframe src="$HTML_LINK" width="1500" height="1000" allowfullscreen="true" mozallowfullscreen="true" webkitallowfullscreen="true"></iframe>
</body>
</html>
""")
html_content = html_template.substitute(HTML_LINK=html_link)
f.write(html_content)
f.close()
os.chmod(html_file_name, 0755) #make the file web readable before upload
return(html_file_name)
def s3manage(args):
"""
Utilities for managing S3 bukcets
"""
#establish a connection to S3
bucket = s3.bucketManager(STATIC_BUCKET, args.aws_key, args.aws_secret, rememberMe=args.rememberMe)
#if user specifies an html link
if args.html_link is not None:
html_file = create_html_file(args.html_link)
args.upload_path = html_file
if os.path.isdir(args.upload_path) is True:
url = bucket.uploadDir(args.upload_path,args.upload_prefix)
else:
url = bucket.uploadFiles(args.upload_path,args.upload_prefix)
if args.synapse_wikiID is not None:
embed_url_in_synapse_wiki(url,args.synapse_wikiID)
def embed_url_in_synapse_wiki(url, wikiID):
import synapseclient
syn = synapseclient.login()
wiki = syn.getWiki(wikiID)
markdown = wiki['markdown']
#complete hack
if len(url) > 1:
url = [url[x] for x in url if x.endswith('index.html')]
url = url[0]
else:
url = url.values()[0]
#percent encoded URL
import urllib
url = urllib.quote(url, safe='')
link_markdown = '${iframe?site=' + url + '&height=1000}'
wiki['markdown'] = link_markdown
wiki = syn.store(wiki)
syn.onweb(wikiID)
def build_parser():
"""Builds the argument parser and returns the result."""
parser = argparse.ArgumentParser(description='Synapse Python Utilities')
parser.add_argument('--debug', dest='debug', action='store_true')
subparsers = parser.add_subparsers(title='commands',
description='The following commands are available:',
help='For additional help: "synutils.py <COMMAND> -h"')
parser_s3 = subparsers.add_parser('s3',help='utilities to manage data on static.synapse.org')
parser_s3.add_argument('-k' , '--aws_key', dest='aws_key', help='AWS Key', default=None)
parser_s3.add_argument('-s' , '--aws_secret', dest='aws_secret', help='AWS secret key', default=None)
parser_s3.add_argument('-up', '--upload', dest='upload_path', type=str, default=None)
parser_s3.add_argument('-l', '--link', dest='html_link', type=str, default=None,
help = "html link to embed in a synapse wiki")
parser_s3.add_argument('-w', '--wikiID', dest='synapse_wikiID', type=str, default=None,
help = "synapse wiki id to embed the link in")
parser_s3.add_argument('-p', '--prefix', dest='upload_prefix', type=str, default='scratch/',
help = 'prefix adds the sub dir structure on S3 eg. test/ will add the file under test/ folder on s3 bucket')
parser_s3.add_argument('--rememberMe', '--remember-me', dest='rememberMe', action='store_true', default=False,
help='Cache credentials for automatic authentication for future interactions')
parser_s3.set_defaults(func=s3manage)
return parser
def perform_main(args):
if 'func' in args:
try:
args.func(args)
except Exception as ex:
raise
def main():
args = build_parser().parse_args()
perform_main(args)
if __name__ == "__main__":
main()
|
apache-2.0
| -1,364,867,809,172,800,000
| 33.008333
| 144
| 0.645675
| false
| 3.435185
| false
| false
| false
|
dddomodossola/remi
|
examples/onclose_window_app.py
|
1
|
1513
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import remi.gui as gui
from remi import start, App
class MyApp(App):
def main(self, name='world'):
# margin 0px auto allows to center the app to the screen
wid = gui.VBox(width=300, height=200, margin='0px auto')
lbl = gui.Label("Close or reload the page, the console thread will stop automatically.")
wid.append(lbl)
# add the following 3 lines to your app and the on_window_close method to make the console close automatically
tag = gui.Tag(_type='script')
tag.add_child("javascript", """window.onunload=function(e){remi.sendCallback('%s','%s');return "close?";};""" % (
str(id(self)), "on_window_close"))
wid.add_child("onunloadevent", tag)
# returning the root widget
return wid
def on_window_close(self):
# here you can handle the unload
print("app closing")
self.close()
if __name__ == "__main__":
start(MyApp)
|
apache-2.0
| -3,865,904,934,796,648,400
| 34.186047
| 121
| 0.662921
| false
| 3.992084
| false
| false
| false
|
opentripplanner/OTPQA
|
hreport.py
|
1
|
2481
|
import simplejson as json
import numpy as np
def parsetime(aa):
if aa is None:
return None
return float( aa.split()[0] )
def main(filenames):
if len(filenames)==0:
return
yield "<html>"
yield """<head><style>table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
text-align: left;
}</style></head>"""
datasets = []
shas = []
for fn in filenames:
blob = json.load( open(fn) )
shas.append( blob['git_sha1'] )
dataset = dict( [(response["id_tuple"], response) for response in blob['responses']] )
datasets.append( dataset )
id_tuples = datasets[0].keys()
yield """<table border="1">"""
dataset_total_times = dict(zip( range(len(datasets)),[[] for x in range(len(datasets))]) )
dataset_avg_times = dict(zip(range(len(datasets)),[[] for x in range(len(datasets))]) )
dataset_fails = dict(zip(range(len(datasets)), [0]*len(datasets)))
yield "<tr><td>request id</td>"
for fn,sha in zip(filenames,shas):
yield "<td>%s (commit:%s)</td>"%(fn,sha)
yield "</tr>"
for id_tuple in id_tuples:
yield """<tr><td rowspan="2"><a href="%s">%s</a></td>"""%(datasets[0][id_tuple]['url'], id_tuple)
for i, dataset in enumerate( datasets ):
response = dataset[id_tuple]
dataset_total_times[i].append( parsetime( response['total_time'] ) )
dataset_avg_times[i].append( parsetime( response['avg_time'] ) )
yield "<td>%s total, %s avg</td>"%(response['total_time'],response['avg_time'])
yield "</tr>"
for i, dataset in enumerate( datasets ):
yield "<td>"
response = dataset[id_tuple]
yield "<table border=1 width=100%><tr>"
if len(response['itins']) == 0:
dataset_fails[i] += 1
yield "<td style=\"background-color:#EDA1A1\">NONE</td>"
for itin in response['itins']:
filling = itin['routes']
if filling=="{}":
color = "#EDECA1"
else:
color = "#AEEDA1"
yield "<td style=\"background-color:%s\">%s</td>"%(color,filling)
yield "</tr></table>"
yield "</td>"
yield "</tr>"
yield "<tr><td>stats</td>"
for i in range(len(datasets)):
yield "<td>fails: %s (%.2f%%). total time: median:%.2fs mean:%.2fs</td>"%(dataset_fails[i], 100*dataset_fails[i]/float(len(id_tuples)), np.median(dataset_total_times[i]),np.mean(dataset_total_times[i]))
yield "</tr>"
yield "</table>"
yield "</html>"
if __name__=='__main__':
import sys
if len(sys.argv)<2:
print "usage: cmd fn1 [fn2 [fn3 ...]]"
exit()
for line in main(sys.argv[1:]):
print line
|
gpl-3.0
| -7,302,979,014,916,302,000
| 24.854167
| 204
| 0.614672
| false
| 2.868208
| false
| false
| false
|
pdamodaran/yellowbrick
|
yellowbrick/text/dispersion.py
|
1
|
10916
|
# yellowbrick.text.dispersion
# Implementations of lexical dispersions for text visualization.
#
# Author: Larry Gray
# Created: 2018-06-21 10:06
#
# Copyright (C) 2018 District Data Labs
# For license information, see LICENSE.txt
#
# ID: dispersion.py [] lwgray@gmail.com $
"""
Implementation of lexical dispersion for text visualization
"""
##########################################################################
## Imports
##########################################################################
from collections import defaultdict
import itertools
from yellowbrick.text.base import TextVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
import numpy as np
##########################################################################
## Dispersion Plot Visualizer
##########################################################################
class DispersionPlot(TextVisualizer):
"""
DispersionPlotVisualizer allows for visualization of the lexical dispersion
of words in a corpus. Lexical dispersion is a measure of a word's
homeogeneity across the parts of a corpus. This plot notes the occurences
of a word and how many words from the beginning it appears.
Parameters
----------
target_words : list
A list of target words whose dispersion across a corpus passed at fit
will be visualized.
ax : matplotlib axes, default: None
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
ignore_case : boolean, default: False
Specify whether input will be case-sensitive.
annotate_docs : boolean, default: False
Specify whether document boundaries will be displayed. Vertical lines
are positioned at the end of each document.
kwargs : dict
Pass any additional keyword arguments to the super class.
These parameters can be influenced later on in the visualization
process, but can and should be set as early as possible.
"""
# NOTE: cannot be np.nan
NULL_CLASS = None
def __init__(self, target_words, ax=None, colors=None, ignore_case=False,
annotate_docs=False, labels=None, colormap=None, **kwargs):
super(DispersionPlot, self).__init__(ax=ax, **kwargs)
self.labels = labels
self.colors = colors
self.colormap = colormap
self.target_words = target_words
self.ignore_case = ignore_case
self.annotate_docs = annotate_docs
def _compute_dispersion(self, text, y):
self.boundaries_ = []
offset = 0
if y is None:
y = itertools.repeat(None)
for doc, target in zip(text, y):
for word in doc:
if self.ignore_case:
word = word.lower()
# NOTE: this will find all indices if duplicate words are supplied
# In the case that word is not in target words, any empty list is
# returned and no data will be yielded
offset += 1
for y_coord in (self.indexed_words_ == word).nonzero()[0]:
y_coord = int(y_coord)
yield (offset, y_coord, target)
if self.annotate_docs:
self.boundaries_.append(offset)
self.boundaries_ = np.array(self.boundaries_, dtype=int)
def _check_missing_words(self, points):
for index in range(len(self.indexed_words_)):
if index in points[:,1]:
pass
else:
raise YellowbrickValueError((
"The indexed word '{}' is not found in "
"this corpus"
).format(self.indexed_words_[index]))
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the dispersion
visualization.
Parameters
----------
X : list or generator
Should be provided as a list of documents or a generator
that yields a list of documents that contain a list of
words in the order they appear in the document.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
if y is not None:
self.classes_ = np.unique(y)
elif y is None and self.labels is not None:
self.classes_ = np.array([self.labels[0]])
else:
self.classes_ = np.array([self.NULL_CLASS])
# Create an index (e.g. the y position) for the target words
self.indexed_words_ = np.flip(self.target_words, axis=0)
if self.ignore_case:
self.indexed_words_ = np.array([w.lower() for w in self.indexed_words_])
# Stack is used to create a 2D array from the generator
try:
points_target = np.stack(self._compute_dispersion(X, y))
except ValueError:
raise YellowbrickValueError((
"No indexed words were found in the corpus"
))
points = np.stack(zip(points_target[:,0].astype(int),
points_target[:,1].astype(int)))
self.target = points_target[:,2]
self._check_missing_words(points)
self.draw(points, self.target)
return self
def draw(self, points, target=None, **kwargs):
"""
Called from the fit method, this method creates the canvas and
draws the plot on it.
Parameters
----------
kwargs: generic keyword arguments.
"""
# Resolve the labels with the classes
labels = self.labels if self.labels is not None else self.classes_
if len(labels) != len(self.classes_):
raise YellowbrickValueError((
"number of supplied labels ({}) does not "
"match the number of classes ({})"
).format(len(labels), len(self.classes_)))
# Create the color mapping for the labels.
color_values = resolve_colors(
n_colors=len(labels), colormap=self.colormap, colors=self.color)
colors = dict(zip(labels, color_values))
# Transform labels into a map of class to label
labels = dict(zip(self.classes_, labels))
# Define boundaries with a vertical line
if self.annotate_docs:
for xcoords in self.boundaries_:
self.ax.axvline(x=xcoords, color='lightgray', linestyle='dashed')
series = defaultdict(lambda: {'x':[], 'y':[]})
if target is not None:
for point, t in zip(points, target):
label = labels[t]
series[label]['x'].append(point[0])
series[label]['y'].append(point[1])
else:
label = self.classes_[0]
for x, y in points:
series[label]['x'].append(x)
series[label]['y'].append(y)
for label, points in series.items():
self.ax.scatter(points['x'], points['y'], marker='|',
c=colors[label], zorder=100, label=label)
self.ax.set_yticks(list(range(len(self.indexed_words_))))
self.ax.set_yticklabels(self.indexed_words_)
def finalize(self, **kwargs):
"""
The finalize method executes any subclass-specific axes
finalization steps. The user calls poof & poof calls finalize.
Parameters
----------
kwargs: generic keyword arguments.
"""
self.ax.set_ylim(-1, len(self.indexed_words_))
self.ax.set_title("Lexical Dispersion Plot")
self.ax.set_xlabel("Word Offset")
self.ax.grid(False)
# Add the legend outside of the figure box.
if not all(self.classes_ == np.array([self.NULL_CLASS])):
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
self.ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
##########################################################################
## Quick Method
##########################################################################
def dispersion(words, corpus, y=None, ax=None, colors=None, colormap=None,
labels=None, annotate_docs=False, ignore_case=False, **kwargs):
""" Displays lexical dispersion plot for words in a corpus
This helper function is a quick wrapper to utilize the DisperstionPlot
Visualizer for one-off analysis
Parameters
----------
words : list
A list of words whose dispersion will be examined within a corpus
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class.
corpus : list
Should be provided as a list of documents that contain
a list of words in the order they appear in the document.
ax : matplotlib axes, default: None
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Qualitative colormap for discrete target
annotate_docs : boolean, default: False
Specify whether document boundaries will be displayed. Vertical lines
are positioned at the end of each document.
ignore_case : boolean, default: False
Specify whether input will be case-sensitive.
kwargs : dict
Pass any additional keyword arguments to the super class.
Returns
-------
ax: matplotlib axes
Returns the axes that the plot was drawn on
"""
# Instantiate the visualizer
visualizer = DispersionPlot(
words, ax=ax, colors=colors, colormap=colormap,
ignore_case=ignore_case, labels=labels,
annotate_docs=annotate_docs, **kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(corpus, y, **kwargs)
# Return the axes object on the visualizer
return visualizer.ax
|
apache-2.0
| -2,299,755,290,369,907,200
| 33.764331
| 84
| 0.588402
| false
| 4.501443
| false
| false
| false
|
lucasberti/telegrao-py
|
plugins/apex.py
|
1
|
2106
|
import requests
import json
from api import send_message
ENDPOINT = "https://public-api.tracker.gg/apex/v1/standard/profile/5/"
PLAYERS = {
14160874: "bertoncio",
16631085: "beartz",
85867003: "R3TCH4",
52451934: "xisteaga",
10549434: "Axasdas123",
123123862: "MeroFabio",
569341881: "burnovisk",
299335806: "Springl3s",
77547673: "BeDabul"
}
def get_stats(username):
headers = {
"TRN-Api-Key": "987c5b41-5649-4b4e-9d3f-4d58cc904584"
}
return requests.get(ENDPOINT + username, headers=headers).json()
def get_string(data):
data = data["data"]
legend_on_menu = data["children"][0]["metadata"]["legend_name"]
username = data["metadata"]["platformUserHandle"]
hero_stats = ""
for legend in data["children"]:
hero_stats += f"{legend['metadata']['legend_name']}\n"
for stat in legend["stats"]:
name = stat["metadata"]["name"]
value = stat["displayValue"]
percentile = stat["percentile"] if "percentile" in stat.keys() else "desconecidi"
rank = stat["rank"] if "rank" in stat.keys() else "desconecidi"
hero_stats += f"{name}: {value} (top {percentile}% rank {rank})\n"
hero_stats += "\n"
global_stats = ""
for stat in data["stats"]:
global_stats += f"{stat['metadata']['name']}: {stat['displayValue']}\n"
return f"""{username} mt noob rs
ta c {legend_on_menu} selelessiondn
{hero_stats}
globau:
{global_stats}"""
def on_msg_received(msg, matches):
chat = msg["chat"]["id"]
user = msg["from"]["id"]
player = None
if matches.group(1):
player = matches.group(1)
else:
if user in PLAYERS:
player = PLAYERS[user]
if player is not None:
try:
data = get_stats(player)
stats = get_string(data)
print(stats)
send_message(chat, stats)
except Exception as e:
send_message(chat, f"vish deu merda..... {e}")
|
mit
| 2,123,689,757,657,363,200
| 25
| 93
| 0.561254
| false
| 3.337559
| false
| false
| false
|
vik001ind/RSAExploits
|
RSAExploits/exploits/hastad.py
|
1
|
3524
|
""" Class defintion for simple hastad broadcast exploit """
from RSAExploits import util
from RSAExploits.exploits.exploit import Exploit
class Hastad(Exploit):
""" Class providing a run interface to hastad broadcast exploit"""
def run(self, rsadata_list, info_dict = None):
""" Attempts to recover plaintext using Hastad's broadcast attack
This attack works when we have a list of RSA_Data objects such
that the length of the list is greater than or equal to e,
unique and coprime moduli are used for each encryption, and the
same plaintext message is encrypted to generate all ciphertexts.
Args:
rsadata_list: A list of RSA_Data objects on which to attempt
Hastad's exploit
info_dict: Not currently used
Assumptions:
None of the RSA_Data objects in rsadata_list have the same
public exponent e, same id number, and same modulus N. This
should be prevented by calling the remove_duplicates() function
in RSA_Data.py
Side Effects:
If a message is recovered, the corresponding RSA_Data objects
will be updated with this information
Return:
True if at least one message was recovered
"""
print ("Hastad: Running Attack...")
success = False
e_id_dict = self.group_by_e_and_id(rsadata_list)
for group in e_id_dict:
msg = self.hastad_broadcast_exploit(e_id_dict[group])
if msg != None:
success = True
for rsadata in e_id_dict[group]:
rsadata.set_m(msg)
if success:
print("Hastad: Success, message found.")
else:
print("Hastad: Failure, message not found.")
return success
@staticmethod
def group_by_e_and_id(rsadata_list):
""" Group the RSA_Data objects by public exponent and id """
e_id_dict = {}
for rsadata in rsadata_list:
# Only consider entries with an ID number
if rsadata.get_id() == None:
continue
# Only consider entries with a ciphertext
if rsadata.get_c() == None:
continue
# If the (e, idnum) tuple already exists in the dictionary, just
# append the new entry to the already existing list
if (rsadata.get_e(), rsadata.get_id()) in e_id_dict:
e_id_dict[(rsadata.get_e(), rsadata.get_id())].append(rsadata)
# Otherwise, create a new list for the new tuple
else:
e_id_dict[(rsadata.get_e(), rsadata.get_id())] = [rsadata]
return e_id_dict
@staticmethod
def hastad_broadcast_exploit(rsadata_list):
""" Recover the plaintext message using chinese remainder theorem """
e = rsadata_list[0].get_e()
if len(rsadata_list) < e:
return None
ns = []
cs = []
for index in range(e):
ns.append(rsadata_list[index].get_n())
cs.append(rsadata_list[index].get_c())
s = util.crt(ns, cs)
pt = util.int_nthroot(s, e)
if pt is not None:
return pt
else:
return None
|
mit
| -132,503,831,933,218,540
| 33.891089
| 80
| 0.542849
| false
| 4.308068
| false
| false
| false
|
aschleg/mathpy
|
mathpy/special/gamma.py
|
1
|
1555
|
# encoding=utf8
import numpy as np
def k_function(n):
r"""
Returns the K-function up to a given integer n.
Parameters
----------
n : int
The length of the returned K-function as in :math:`K(n)`.
Returns
-------
array-like
numpy array of the computed integers returned by the K-function.
Notes
-----
The K-function generalizes the hyperfactorial for complex numbers and is defined for positive
integers as:
.. math::
K(n) \equiv 1^1 2^2 3^3 \cdots (n - 1)^{n - 1}
The K-function can also be expressed as a hyperfactorial, :math:`H`:
.. math::
K(n) = H(n - 1)
The Gamma function and Barnes G-Function are also closely related by:
.. math::
K(n) = \frac{[\Gamma{n}]^{n - 1}}{G(n)}
Examples
--------
>>> k_function(3)
array([1., 1., 4.])
>>> k_function(5).astype(int)
array([ 1, 1, 4, 108, 27648])
References
----------
Sloane, N. J. A. Sequence A002109/M3706 in "The On-Line Encyclopedia of Integer Sequences."
Weisstein, Eric W. "K-Function." From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/K-Function.html
Wikipedia contributors. (2015, December 5). K-function. In Wikipedia, The Free Encyclopedia.
Retrieved 13:56, March 1, 2018, from https://en.wikipedia.org/w/index.php?title=K-function&oldid=693891074
"""
kn = np.empty(n)
k = 1
for i in np.arange(0, n):
k *= (i) ** (i)
kn[i] = k
return kn
|
mit
| -6,329,161,334,717,358,000
| 21.867647
| 114
| 0.573633
| false
| 3.246347
| false
| false
| false
|
maarteninja/ml2014
|
three/minimize.py
|
1
|
8935
|
#This program is distributed WITHOUT ANY WARRANTY; without even the implied
#warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
#
#This file contains a Python version of Carl Rasmussen's Matlab-function
#minimize.m
#
#minimize.m is copyright (C) 1999 - 2006, Carl Edward Rasmussen.
#Python adaptation by Roland Memisevic 2008.
#
#
#The following is the original copyright notice that comes with the
#function minimize.m
#(from http://www.kyb.tuebingen.mpg.de/bs/people/carl/code/minimize/Copyright):
#
#
#"(C) Copyright 1999 - 2006, Carl Edward Rasmussen
#
#Permission is granted for anyone to copy, use, or modify these
#programs and accompanying documents for purposes of research or
#education, provided this copyright notice is retained, and note is
#made of any changes that have been made.
#
#These programs and documents are distributed without any warranty,
#express or implied. As the programs were written for research
#purposes only, they have not been tested to the degree that would be
#advisable in any important application. All use of these programs is
#entirely at the user's own risk."
"""minimize.py
This module contains a function 'minimize' that performs unconstrained
gradient based optimization using nonlinear conjugate gradients.
The function is a straightforward Python-translation of Carl Rasmussen's
Matlab-function minimize.m
"""
from numpy import dot, isinf, isnan, any, sqrt, isreal, real, nan, inf
def minimize(X, f, grad, args, maxnumlinesearch=None, maxnumfuneval=None, red=1.0, verbose=True):
INT = 0.1;# don't reevaluate within 0.1 of the limit of the current bracket
EXT = 3.0; # extrapolate maximum 3 times the current step-size
MAX = 20; # max 20 function evaluations per line search
RATIO = 10; # maximum allowed slope ratio
SIG = 0.1;RHO = SIG/2;# SIG and RHO are the constants controlling the Wolfe-
#Powell conditions. SIG is the maximum allowed absolute ratio between
#previous and new slopes (derivatives in the search direction), thus setting
#SIG to low (positive) values forces higher precision in the line-searches.
#RHO is the minimum allowed fraction of the expected (from the slope at the
#initial point in the linesearch). Constants must satisfy 0 < RHO < SIG < 1.
#Tuning of SIG (depending on the nature of the function to be optimized) may
#speed up the minimization; it is probably not worth playing much with RHO.
SMALL = 10.**-16 #minimize.m uses matlab's realmin
if maxnumlinesearch == None:
if maxnumfuneval == None:
raise "Specify maxnumlinesearch or maxnumfuneval"
else:
S = 'Function evaluation'
length = maxnumfuneval
else:
if maxnumfuneval != None:
raise "Specify either maxnumlinesearch or maxnumfuneval (not both)"
else:
S = 'Linesearch'
length = maxnumlinesearch
i = 0 # zero the run length counter
ls_failed = 0 # no previous line search has failed
f0 = f(X, *args) # get function value and gradient
df0 = grad(X, *args)
fX = [f0]
i = i + (length<0) # count epochs?!
s = -df0; d0 = -dot(s,s) # initial search direction (steepest) and slope
x3 = red/(1.0-d0) # initial step is red/(|s|+1)
while i < abs(length): # while not finished
i = i + (length>0) # count iterations?!
X0 = X; F0 = f0; dF0 = df0 # make a copy of current values
if length>0:
M = MAX
else:
M = min(MAX, -length-i)
while 1: # keep extrapolating as long as necessary
x2 = 0; f2 = f0; d2 = d0; f3 = f0; df3 = df0
success = 0
while (not success) and (M > 0):
try:
M = M - 1; i = i + (length<0) # count epochs?!
f3 = f(X+x3*s, *args)
df3 = grad(X+x3*s, *args)
if isnan(f3) or isinf(f3) or any(isnan(df3)+isinf(df3)):
print "an error in minimize error"
print "f3 = ", f3
print "df3 = ", df3
return
success = 1
except: # catch any error which occured in f
x3 = (x2+x3)/2 # bisect and try again
if f3 < F0:
X0 = X+x3*s; F0 = f3; dF0 = df3 # keep best values
d3 = dot(df3,s) # new slope
if d3 > SIG*d0 or f3 > f0+x3*RHO*d0 or M == 0:
# are we done extrapolating?
break
x1 = x2; f1 = f2; d1 = d2 # move point 2 to point 1
x2 = x3; f2 = f3; d2 = d3 # move point 3 to point 2
A = 6*(f1-f2)+3*(d2+d1)*(x2-x1) # make cubic extrapolation
B = 3*(f2-f1)-(2*d1+d2)*(x2-x1)
Z = B+sqrt(complex(B*B-A*d1*(x2-x1)))
if Z != 0.0:
x3 = x1-d1*(x2-x1)**2/Z # num. error possible, ok!
else:
x3 = inf
if (not isreal(x3)) or isnan(x3) or isinf(x3) or (x3 < 0):
# num prob | wrong sign?
x3 = x2*EXT # extrapolate maximum amount
elif x3 > x2*EXT: # new point beyond extrapolation limit?
x3 = x2*EXT # extrapolate maximum amount
elif x3 < x2+INT*(x2-x1): # new point too close to previous point?
x3 = x2+INT*(x2-x1)
x3 = real(x3)
while (abs(d3) > -SIG*d0 or f3 > f0+x3*RHO*d0) and M > 0:
# keep interpolating
if (d3 > 0) or (f3 > f0+x3*RHO*d0): # choose subinterval
x4 = x3; f4 = f3; d4 = d3 # move point 3 to point 4
else:
x2 = x3; f2 = f3; d2 = d3 # move point 3 to point 2
if f4 > f0:
x3 = x2-(0.5*d2*(x4-x2)**2)/(f4-f2-d2*(x4-x2))
# quadratic interpolation
else:
A = 6*(f2-f4)/(x4-x2)+3*(d4+d2) # cubic interpolation
B = 3*(f4-f2)-(2*d2+d4)*(x4-x2)
if A != 0:
x3=x2+(sqrt(B*B-A*d2*(x4-x2)**2)-B)/A
# num. error possible, ok!
else:
x3 = inf
if isnan(x3) or isinf(x3):
x3 = (x2+x4)/2 # if we had a numerical problem then bisect
x3 = max(min(x3, x4-INT*(x4-x2)),x2+INT*(x4-x2))
# don't accept too close
f3 = f(X+x3*s, *args)
df3 = grad(X+x3*s, *args)
if f3 < F0:
X0 = X+x3*s; F0 = f3; dF0 = df3 # keep best values
M = M - 1; i = i + (length<0) # count epochs?!
d3 = dot(df3,s) # new slope
if abs(d3) < -SIG*d0 and f3 < f0+x3*RHO*d0: # if line search succeeded
X = X+x3*s; f0 = f3; fX.append(f0) # update variables
if verbose: print '%s %6i; Value %4.6e\r' % (S, i, f0)
s = (dot(df3,df3)-dot(df0,df3))/dot(df0,df0)*s - df3
# Polack-Ribiere CG direction
df0 = df3 # swap derivatives
d3 = d0; d0 = dot(df0,s)
if d0 > 0: # new slope must be negative
s = -df0; d0 = -dot(s,s) # otherwise use steepest direction
x3 = x3 * min(RATIO, d3/(d0-SMALL)) # slope ratio but max RATIO
ls_failed = 0 # this line search did not fail
else:
X = X0; f0 = F0; df0 = dF0 # restore best point so far
if ls_failed or (i>abs(length)):# line search failed twice in a row
break # or we ran out of time, so we give up
s = -df0; d0 = -dot(s,s) # try steepest
x3 = 1/(1-d0)
ls_failed = 1 # this line search failed
if verbose: print "\n"
return X, fX, i
|
gpl-2.0
| -1,338,693,259,614,311,000
| 49.196629
| 97
| 0.484835
| false
| 3.643964
| false
| false
| false
|
GiulioRossetti/ndlib
|
ndlib/models/epidemics/IndependentCascadesModel.py
|
1
|
3628
|
from ..DiffusionModel import DiffusionModel
import numpy as np
import future.utils
__author__ = 'Giulio Rossetti'
__license__ = "BSD-2-Clause"
__email__ = "giulio.rossetti@gmail.com"
class IndependentCascadesModel(DiffusionModel):
"""
Edge Parameters to be specified via ModelConfig
:param threshold: The edge threshold. As default a value of 0.1 is assumed for all edges.
"""
def __init__(self, graph, seed=None):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph, seed)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
"Removed": 2
}
self.parameters = {
"model": {},
"nodes": {},
"edges": {
"threshold": {
"descr": "Edge threshold",
"range": [0, 1],
"optional": True,
"default": 0.1
}
},
}
self.name = "Independent Cascades"
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes:
if self.status[u] != 1:
continue
neighbors = list(self.graph.neighbors(u)) # neighbors and successors (in DiGraph) produce the same result
# Standard threshold
if len(neighbors) > 0:
threshold = 1.0/len(neighbors)
for v in neighbors:
if actual_status[v] == 0:
key = (u, v)
# Individual specified thresholds
if 'threshold' in self.params['edges']:
if key in self.params['edges']['threshold']:
threshold = self.params['edges']['threshold'][key]
elif (v, u) in self.params['edges']['threshold'] and not self.graph.directed:
threshold = self.params['edges']['threshold'][(v, u)]
flip = np.random.random_sample()
if flip <= threshold:
actual_status[v] = 1
actual_status[u] = 2
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
|
bsd-2-clause
| -6,692,687,738,186,138,000
| 35.28
| 118
| 0.507993
| false
| 4.435208
| false
| false
| false
|
Northeaster/TargetSentimentAnalysis
|
lib/rnn_cells/gru_cell.py
|
1
|
1575
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from lib.rnn_cells.base_cell import BaseCell
from lib import linalg
#***************************************************************
class GRUCell(BaseCell):
""""""
#=============================================================
def __call__(self, inputs, state, scope=None):
""""""
with tf.variable_scope(scope or type(self).__name__):
with tf.variable_scope('Gates'):
linear = linalg.linear([inputs, state],
self.output_size,
add_bias=True,
n_splits=2,
moving_params=self.moving_params)
update_act, reset_act = linear
update_gate = linalg.sigmoid(update_act-self.forget_bias)
reset_gate = linalg.sigmoid(reset_act)
reset_state = reset_gate * state
with tf.variable_scope('Candidate'):
hidden_act = linalg.linear([inputs, reset_state],
self.output_size,
add_bias=False,
moving_params=self.moving_params)
hidden_tilde = self.recur_func(hidden_act)
hidden = update_gate * state + (1-update_gate) * hidden_tilde
return hidden, hidden
#=============================================================
@property
def state_size(self):
return self.output_size
|
apache-2.0
| -7,597,540,596,920,359,000
| 34.795455
| 68
| 0.486984
| false
| 4.578488
| false
| false
| false
|
cbenhagen/kivy
|
kivy/uix/textinput.py
|
1
|
108858
|
# -*- encoding: utf-8 -*-
'''
Text Input
==========
.. versionadded:: 1.0.4
.. image:: images/textinput-mono.jpg
.. image:: images/textinput-multi.jpg
The :class:`TextInput` widget provides a box for editable plain text.
Unicode, multiline, cursor navigation, selection and clipboard features
are supported.
The :class:`TextInput` uses two different coordinate systems:
* (x, y) - coordinates in pixels, mostly used for rendering on screen.
* (row, col) - cursor index in characters / lines, used for selection
and cursor movement.
Usage example
-------------
To create a multiline :class:`TextInput` (the 'enter' key adds a new line)::
from kivy.uix.textinput import TextInput
textinput = TextInput(text='Hello world')
To create a singleline :class:`TextInput`, set the :class:`TextInput.multiline`
property to False (the 'enter' key will defocus the TextInput and emit an
'on_text_validate' event)::
def on_enter(instance, value):
print('User pressed enter in', instance)
textinput = TextInput(text='Hello world', multiline=False)
textinput.bind(on_text_validate=on_enter)
The textinput's text is stored in its :attr:`TextInput.text` property. To run a
callback when the text changes::
def on_text(instance, value):
print('The widget', instance, 'have:', value)
textinput = TextInput()
textinput.bind(text=on_text)
You can set the :class:`focus <kivy.uix.behaviors.FocusBehavior>` to a
Textinput, meaning that the input box will be highlighted and keyboard focus
will be requested::
textinput = TextInput(focus=True)
The textinput is defocused if the 'escape' key is pressed, or if another
widget requests the keyboard. You can bind a callback to the focus property to
get notified of focus changes::
def on_focus(instance, value):
if value:
print('User focused', instance)
else:
print('User defocused', instance)
textinput = TextInput()
textinput.bind(focus=on_focus)
See :class:`~kivy.uix.behaviors.FocusBehavior`, from which the
:class:`TextInput` inherits, for more details.
Selection
---------
The selection is automatically updated when the cursor position changes.
You can get the currently selected text from the
:attr:`TextInput.selection_text` property.
Filtering
---------
You can control which text can be added to the :class:`TextInput` by
overwriting :meth:`TextInput.insert_text`. Every string that is typed, pasted
or inserted by any other means into the :class:`TextInput` is passed through
this function. By overwriting it you can reject or change unwanted characters.
For example, to write only in capitalized characters::
class CapitalInput(TextInput):
def insert_text(self, substring, from_undo=False):
s = substring.upper()
return super(CapitalInput, self).insert_text(s,\
from_undo=from_undo)
Or to only allow floats (0 - 9 and a single period)::
class FloatInput(TextInput):
pat = re.compile('[^0-9]')
def insert_text(self, substring, from_undo=False):
pat = self.pat
if '.' in self.text:
s = re.sub(pat, '', substring)
else:
s = '.'.join([re.sub(pat, '', s) for s in\
substring.split('.', 1)])
return super(FloatInput, self).insert_text(s, from_undo=from_undo)
Default shortcuts
-----------------
=============== ========================================================
Shortcuts Description
--------------- --------------------------------------------------------
Left Move cursor to left
Right Move cursor to right
Up Move cursor to up
Down Move cursor to down
Home Move cursor at the beginning of the line
End Move cursor at the end of the line
PageUp Move cursor to 3 lines before
PageDown Move cursor to 3 lines after
Backspace Delete the selection or character before the cursor
Del Delete the selection of character after the cursor
Shift + <dir> Start a text selection. Dir can be Up, Down, Left or
Right
Control + c Copy selection
Control + x Cut selection
Control + p Paste selection
Control + a Select all the content
Control + z undo
Control + r redo
=============== ========================================================
.. note::
To enable Emacs-style keyboard shortcuts, you can use
:class:`~kivy.uix.behaviors.emacs.EmacsBehavior`.
'''
__all__ = ('TextInput', )
import re
import sys
import string
from functools import partial
from os import environ
from weakref import ref
from kivy.animation import Animation
from kivy.base import EventLoop
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.config import Config
from kivy.compat import PY2
from kivy.logger import Logger
from kivy.metrics import inch
from kivy.utils import boundary, platform
from kivy.uix.behaviors import FocusBehavior
from kivy.core.text import Label
from kivy.graphics import Color, Rectangle, PushMatrix, PopMatrix, Callback
from kivy.graphics.context_instructions import Transform
from kivy.graphics.texture import Texture
from kivy.uix.widget import Widget
from kivy.uix.bubble import Bubble
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.image import Image
from kivy.properties import StringProperty, NumericProperty, \
BooleanProperty, AliasProperty, \
ListProperty, ObjectProperty, VariableListProperty, OptionProperty
Cache_register = Cache.register
Cache_append = Cache.append
Cache_get = Cache.get
Cache_remove = Cache.remove
Cache_register('textinput.label', timeout=60.)
Cache_register('textinput.width', timeout=60.)
FL_IS_NEWLINE = 0x01
# late binding
Clipboard = None
CutBuffer = None
MarkupLabel = None
_platform = platform
# for reloading, we need to keep a list of textinput to retrigger the rendering
_textinput_list = []
# cache the result
_is_osx = sys.platform == 'darwin'
# When we are generating documentation, Config doesn't exist
_is_desktop = False
if Config:
_is_desktop = Config.getboolean('kivy', 'desktop')
# register an observer to clear the textinput cache when OpenGL will reload
if 'KIVY_DOC' not in environ:
def _textinput_clear_cache(*l):
Cache_remove('textinput.label')
Cache_remove('textinput.width')
for wr in _textinput_list[:]:
textinput = wr()
if textinput is None:
_textinput_list.remove(wr)
else:
textinput._trigger_refresh_text()
textinput._refresh_hint_text()
from kivy.graphics.context import get_context
get_context().add_reload_observer(_textinput_clear_cache, True)
class Selector(ButtonBehavior, Image):
# Internal class for managing the selection Handles.
window = ObjectProperty()
target = ObjectProperty()
matrix = ObjectProperty()
def __init__(self, **kwargs):
super(Selector, self).__init__(**kwargs)
self.window.bind(on_touch_down=self.on_window_touch_down)
self.matrix = self.target.get_window_matrix()
with self.canvas.before:
Callback(self.update_transform)
PushMatrix()
self.transform = Transform()
with self.canvas.after:
PopMatrix()
def update_transform(self, cb):
m = self.target.get_window_matrix()
if self.matrix != m:
self.matrix = m
self.transform.identity()
self.transform.transform(self.matrix)
def transform_touch(self, touch):
matrix = self.matrix.inverse()
touch.apply_transform_2d(
lambda x, y: matrix.transform_point(x, y, 0)[:2])
def on_window_touch_down(self, win, touch):
if self.parent is not win:
return
try:
touch.push()
self.transform_touch(touch)
self._touch_diff = self.top - touch.y
if self.collide_point(*touch.pos):
FocusBehavior.ignored_touch.append(touch)
return super(Selector, self).on_touch_down(touch)
finally:
touch.pop()
class TextInputCutCopyPaste(Bubble):
# Internal class used for showing the little bubble popup when
# copy/cut/paste happen.
textinput = ObjectProperty(None)
''' Holds a reference to the TextInput this Bubble belongs to.
'''
but_cut = ObjectProperty(None)
but_copy = ObjectProperty(None)
but_paste = ObjectProperty(None)
but_selectall = ObjectProperty(None)
matrix = ObjectProperty(None)
def __init__(self, **kwargs):
self.mode = 'normal'
super(TextInputCutCopyPaste, self).__init__(**kwargs)
Clock.schedule_interval(self._check_parent, .5)
self.matrix = self.textinput.get_window_matrix()
with self.canvas.before:
Callback(self.update_transform)
PushMatrix()
self.transform = Transform()
with self.canvas.after:
PopMatrix()
def update_transform(self, cb):
m = self.textinput.get_window_matrix()
if self.matrix != m:
self.matrix = m
self.transform.identity()
self.transform.transform(self.matrix)
def transform_touch(self, touch):
matrix = self.matrix.inverse()
touch.apply_transform_2d(
lambda x, y: matrix.transform_point(x, y, 0)[:2])
def on_touch_down(self, touch):
try:
touch.push()
self.transform_touch(touch)
if self.collide_point(*touch.pos):
FocusBehavior.ignored_touch.append(touch)
return super(TextInputCutCopyPaste, self).on_touch_down(touch)
finally:
touch.pop()
def on_textinput(self, instance, value):
global Clipboard
if value and not Clipboard and not _is_desktop:
value._ensure_clipboard()
def _check_parent(self, dt):
# this is a prevention to get the Bubble staying on the screen, if the
# attached textinput is not on the screen anymore.
parent = self.textinput
while parent is not None:
if parent == parent.parent:
break
parent = parent.parent
if parent is None:
Clock.unschedule(self._check_parent)
if self.textinput:
self.textinput._hide_cut_copy_paste()
def on_parent(self, instance, value):
parent = self.textinput
mode = self.mode
if parent:
self.clear_widgets()
if mode == 'paste':
# show only paste on long touch
self.but_selectall.opacity = 1
widget_list = [self.but_selectall, ]
if not parent.readonly:
widget_list.append(self.but_paste)
elif parent.readonly:
# show only copy for read only text input
widget_list = (self.but_copy, )
else:
# normal mode
widget_list = (self.but_cut, self.but_copy, self.but_paste)
for widget in widget_list:
self.add_widget(widget)
def do(self, action):
textinput = self.textinput
if action == 'cut':
textinput._cut(textinput.selection_text)
elif action == 'copy':
textinput.copy()
elif action == 'paste':
textinput.paste()
elif action == 'selectall':
textinput.select_all()
self.mode = ''
anim = Animation(opacity=0, d=.333)
anim.bind(on_complete=lambda *args:
self.on_parent(self, self.parent))
anim.start(self.but_selectall)
return
self.hide()
def hide(self):
parent = self.parent
if not parent:
return
anim = Animation(opacity=0, d=.225)
anim.bind(on_complete=lambda *args: parent.remove_widget(self))
anim.start(self)
class TextInput(FocusBehavior, Widget):
'''TextInput class. See module documentation for more information.
:Events:
`on_text_validate`
Fired only in multiline=False mode when the user hits 'enter'.
This will also unfocus the textinput.
`on_double_tap`
Fired when a double tap happens in the text input. The default
behavior selects the text around the cursor position. More info at
:meth:`on_double_tap`.
`on_triple_tap`
Fired when a triple tap happens in the text input. The default
behavior selects the line around the cursor position. More info at
:meth:`on_triple_tap`.
`on_quad_touch`
Fired when four fingers are touching the text input. The default
behavior selects the whole text. More info at
:meth:`on_quad_touch`.
.. warning::
When changing a :class:`TextInput` property that requires re-drawing,
e.g. modifying the :attr:`text`, the updates occur on the next
clock cycle and not instantly. This might cause any changes to the
:class:`TextInput` that occur between the modification and the next
cycle to be ignored, or to use previous values. For example, after
a update to the :attr:`text`, changing the cursor in the same clock
frame will move it using the previous text and will likely end up in an
incorrect position. The solution is to schedule any updates to occur
on the next clock cycle using
:meth:`~kivy.clock.ClockBase.schedule_once`.
.. Note::
Selection is cancelled when TextInput is focused. If you need to
show selection when TextInput is focused, you should delay
(use Clock.schedule) the call to the functions for selecting
text (select_all, select_text).
.. versionchanged:: 1.9.0
:class:`TextInput` now inherits from
:class:`~kivy.uix.behaviors.FocusBehavior`.
:attr:`~kivy.uix.behaviors.FocusBehavior.keyboard_mode`,
:meth:`~kivy.uix.behaviors.FocusBehavior.show_keyboard`,
:meth:`~kivy.uix.behaviors.FocusBehavior.hide_keyboard`,
:meth:`~kivy.uix.behaviors.FocusBehavior.focus`,
and :attr:`~kivy.uix.behaviors.FocusBehavior.input_type`
have been removed since they are now inherited
from :class:`~kivy.uix.behaviors.FocusBehavior`.
.. versionchanged:: 1.7.0
`on_double_tap`, `on_triple_tap` and `on_quad_touch` events added.
'''
__events__ = ('on_text_validate', 'on_double_tap', 'on_triple_tap',
'on_quad_touch')
def __init__(self, **kwargs):
self.is_focusable = kwargs.get('is_focusable', True)
self._cursor_blink_time = Clock.get_time()
self._cursor = [0, 0]
self._selection = False
self._selection_finished = True
self._selection_touch = None
self.selection_text = u''
self._selection_from = None
self._selection_to = None
self._selection_callback = None
self._handle_left = None
self._handle_right = None
self._handle_middle = None
self._bubble = None
self._lines_flags = []
self._lines_labels = []
self._lines_rects = []
self._hint_text_flags = []
self._hint_text_labels = []
self._hint_text_rects = []
self._label_cached = None
self._line_options = None
self._keyboard_mode = Config.get('kivy', 'keyboard_mode')
self._command_mode = False
self._command = ''
self.reset_undo()
self._touch_count = 0
self._ctrl_l = False
self._ctrl_r = False
self._alt_l = False
self._alt_r = False
self.interesting_keys = {
8: 'backspace',
13: 'enter',
127: 'del',
271: 'enter',
273: 'cursor_up',
274: 'cursor_down',
275: 'cursor_right',
276: 'cursor_left',
278: 'cursor_home',
279: 'cursor_end',
280: 'cursor_pgup',
281: 'cursor_pgdown',
303: 'shift_L',
304: 'shift_R',
305: 'ctrl_L',
306: 'ctrl_R',
308: 'alt_L',
307: 'alt_R'}
super(TextInput, self).__init__(**kwargs)
fbind = self.fbind
refresh_line_options = self._trigger_refresh_line_options
update_text_options = self._update_text_options
fbind('font_size', refresh_line_options)
fbind('font_name', refresh_line_options)
def handle_readonly(instance, value):
if value and (not _is_desktop or not self.allow_copy):
self.is_focusable = False
fbind('padding', update_text_options)
fbind('tab_width', update_text_options)
fbind('font_size', update_text_options)
fbind('font_name', update_text_options)
fbind('size', update_text_options)
fbind('password', update_text_options)
fbind('password_mask', update_text_options)
fbind('pos', self._trigger_update_graphics)
fbind('readonly', handle_readonly)
fbind('focus', self._on_textinput_focused)
handle_readonly(self, self.readonly)
handles = self._trigger_position_handles = Clock.create_trigger(
self._position_handles)
self._trigger_show_handles = Clock.create_trigger(
self._show_handles, .05)
self._trigger_update_cutbuffer = Clock.create_trigger(
self._update_cutbuffer)
refresh_line_options()
self._trigger_refresh_text()
fbind('pos', handles)
fbind('size', handles)
# when the gl context is reloaded, trigger the text rendering again.
_textinput_list.append(ref(self, TextInput._reload_remove_observer))
if platform == 'linux':
self._ensure_clipboard()
def on_text_validate(self):
pass
def cursor_index(self, cursor=None):
'''Return the cursor index in the text/value.
'''
if not cursor:
cursor = self.cursor
try:
l = self._lines
if len(l) == 0:
return 0
lf = self._lines_flags
index, cr = cursor
for row in range(cr):
if row >= len(l):
continue
index += len(l[row])
if lf[row] & FL_IS_NEWLINE:
index += 1
if lf[cr] & FL_IS_NEWLINE:
index += 1
return index
except IndexError:
return 0
def cursor_offset(self):
'''Get the cursor x offset on the current line.
'''
offset = 0
row = self.cursor_row
col = self.cursor_col
_lines = self._lines
if col and row < len(_lines):
offset = self._get_text_width(
_lines[row][:col], self.tab_width,
self._label_cached)
return offset
def get_cursor_from_index(self, index):
'''Return the (row, col) of the cursor from text index.
'''
index = boundary(index, 0, len(self.text))
if index <= 0:
return 0, 0
lf = self._lines_flags
l = self._lines
i = 0
for row in range(len(l)):
ni = i + len(l[row])
if lf[row] & FL_IS_NEWLINE:
ni += 1
i += 1
if ni >= index:
return index - i, row
i = ni
return index, row
def select_text(self, start, end):
''' Select a portion of text displayed in this TextInput.
.. versionadded:: 1.4.0
:Parameters:
`start`
Index of textinput.text from where to start selection
`end`
Index of textinput.text till which the selection should be
displayed
'''
if end < start:
raise Exception('end must be superior to start')
m = len(self.text)
self._selection_from = boundary(start, 0, m)
self._selection_to = boundary(end, 0, m)
self._selection_finished = True
self._update_selection(True)
self._update_graphics_selection()
def select_all(self):
''' Select all of the text displayed in this TextInput.
.. versionadded:: 1.4.0
'''
self.select_text(0, len(self.text))
re_indent = re.compile('^(\s*|)')
def _auto_indent(self, substring):
index = self.cursor_index()
_text = self._get_text(encode=False)
if index > 0:
line_start = _text.rfind('\n', 0, index)
if line_start > -1:
line = _text[line_start + 1:index]
indent = self.re_indent.match(line).group()
substring += indent
return substring
def insert_text(self, substring, from_undo=False):
'''Insert new text at the current cursor position. Override this
function in order to pre-process text for input validation.
'''
if self.readonly or not substring:
return
if isinstance(substring, bytes):
substring = substring.decode('utf8')
if self.replace_crlf:
substring = substring.replace(u'\r\n', u'\n')
mode = self.input_filter
if mode is not None:
chr = type(substring)
if chr is bytes:
int_pat = self._insert_int_patb
else:
int_pat = self._insert_int_patu
if mode == 'int':
substring = re.sub(int_pat, chr(''), substring)
elif mode == 'float':
if '.' in self.text:
substring = re.sub(int_pat, chr(''), substring)
else:
substring = '.'.join([re.sub(int_pat, chr(''), k) for k
in substring.split(chr('.'), 1)])
else:
substring = mode(substring, from_undo)
if not substring:
return
self._hide_handles(EventLoop.window)
if not from_undo and self.multiline and self.auto_indent \
and substring == u'\n':
substring = self._auto_indent(substring)
cc, cr = self.cursor
sci = self.cursor_index
ci = sci()
text = self._lines[cr]
len_str = len(substring)
new_text = text[:cc] + substring + text[cc:]
self._set_line_text(cr, new_text)
wrap = (self._get_text_width(
new_text,
self.tab_width,
self._label_cached) > self.width)
if len_str > 1 or substring == u'\n' or wrap:
# Avoid refreshing text on every keystroke.
# Allows for faster typing of text when the amount of text in
# TextInput gets large.
start, finish, lines,\
lineflags, len_lines = self._get_line_from_cursor(cr, new_text)
# calling trigger here could lead to wrong cursor positioning
# and repeating of text when keys are added rapidly in a automated
# fashion. From Android Keyboard for example.
self._refresh_text_from_property('insert', start, finish, lines,
lineflags, len_lines)
self.cursor = self.get_cursor_from_index(ci + len_str)
# handle undo and redo
self._set_unredo_insert(ci, ci + len_str, substring, from_undo)
def _get_line_from_cursor(self, start, new_text):
# get current paragraph from cursor position
finish = start
lines = self._lines
linesflags = self._lines_flags
if start and not linesflags[start]:
start -= 1
new_text = u''.join((lines[start], new_text))
try:
while not linesflags[finish + 1]:
new_text = u''.join((new_text, lines[finish + 1]))
finish += 1
except IndexError:
pass
lines, lineflags = self._split_smart(new_text)
len_lines = max(1, len(lines))
return start, finish, lines, lineflags, len_lines
def _set_unredo_insert(self, ci, sci, substring, from_undo):
# handle undo and redo
if from_undo:
return
self._undo.append({'undo_command': ('insert', ci, sci),
'redo_command': (ci, substring)})
# reset redo when undo is appended to
self._redo = []
def reset_undo(self):
'''Reset undo and redo lists from memory.
.. versionadded:: 1.3.0
'''
self._redo = self._undo = []
def do_redo(self):
'''Do redo operation.
.. versionadded:: 1.3.0
This action re-does any command that has been un-done by
do_undo/ctrl+z. This function is automatically called when
`ctrl+r` keys are pressed.
'''
try:
x_item = self._redo.pop()
undo_type = x_item['undo_command'][0]
_get_cusror_from_index = self.get_cursor_from_index
if undo_type == 'insert':
ci, substring = x_item['redo_command']
self.cursor = _get_cusror_from_index(ci)
self.insert_text(substring, True)
elif undo_type == 'bkspc':
self.cursor = _get_cusror_from_index(x_item['redo_command'])
self.do_backspace(from_undo=True)
elif undo_type == 'shiftln':
direction, rows, cursor = x_item['redo_command'][1:]
self._shift_lines(direction, rows, cursor, True)
else:
# delsel
ci, sci = x_item['redo_command']
self._selection_from = ci
self._selection_to = sci
self._selection = True
self.delete_selection(True)
self.cursor = _get_cusror_from_index(ci)
self._undo.append(x_item)
except IndexError:
# reached at top of undo list
pass
def do_undo(self):
'''Do undo operation.
.. versionadded:: 1.3.0
This action un-does any edits that have been made since the last
call to reset_undo().
This function is automatically called when `ctrl+z` keys are pressed.
'''
try:
x_item = self._undo.pop()
undo_type = x_item['undo_command'][0]
self.cursor = self.get_cursor_from_index(x_item['undo_command'][1])
if undo_type == 'insert':
ci, sci = x_item['undo_command'][1:]
self._selection_from = ci
self._selection_to = sci
self._selection = True
self.delete_selection(True)
elif undo_type == 'bkspc':
substring = x_item['undo_command'][2:][0]
self.insert_text(substring, True)
elif undo_type == 'shiftln':
direction, rows, cursor = x_item['undo_command'][1:]
self._shift_lines(direction, rows, cursor, True)
else:
# delsel
substring = x_item['undo_command'][2:][0]
self.insert_text(substring, True)
self._redo.append(x_item)
except IndexError:
# reached at top of undo list
pass
def do_backspace(self, from_undo=False, mode='bkspc'):
'''Do backspace operation from the current cursor position.
This action might do several things:
- removing the current selection if available.
- removing the previous char and move the cursor back.
- do nothing, if we are at the start.
'''
if self.readonly:
return
cc, cr = self.cursor
_lines = self._lines
text = _lines[cr]
cursor_index = self.cursor_index()
text_last_line = _lines[cr - 1]
if cc == 0 and cr == 0:
return
_lines_flags = self._lines_flags
start = cr
if cc == 0:
substring = u'\n' if _lines_flags[cr] else u' '
new_text = text_last_line + text
self._set_line_text(cr - 1, new_text)
self._delete_line(cr)
start = cr - 1
else:
#ch = text[cc-1]
substring = text[cc - 1]
new_text = text[:cc - 1] + text[cc:]
self._set_line_text(cr, new_text)
# refresh just the current line instead of the whole text
start, finish, lines, lineflags, len_lines =\
self._get_line_from_cursor(start, new_text)
# avoid trigger refresh, leads to issue with
# keys/text send rapidly through code.
self._refresh_text_from_property('del', start, finish, lines,
lineflags, len_lines)
self.cursor = self.get_cursor_from_index(cursor_index - 1)
# handle undo and redo
self._set_undo_redo_bkspc(
cursor_index,
cursor_index - 1,
substring, from_undo)
def _set_undo_redo_bkspc(self, ol_index, new_index, substring, from_undo):
# handle undo and redo for backspace
if from_undo:
return
self._undo.append({
'undo_command': ('bkspc', new_index, substring),
'redo_command': ol_index})
#reset redo when undo is appended to
self._redo = []
_re_whitespace = re.compile(r'\s+')
def _move_cursor_word_left(self, index=None):
pos = index or self.cursor_index()
if pos == 0:
return self.cursor
lines = self._lines
col, row = self.get_cursor_from_index(pos)
if col == 0:
row -= 1
col = len(lines[row])
while True:
matches = list(self._re_whitespace.finditer(lines[row], 0, col))
if not matches:
if col == 0:
if row == 0:
return 0, 0
row -= 1
col = len(lines[row])
continue
return 0, row
match = matches[-1]
mpos = match.end()
if mpos == col:
if len(matches) > 1:
match = matches[-2]
mpos = match.end()
else:
if match.start() == 0:
if row == 0:
return 0, 0
row -= 1
col = len(lines[row])
continue
return 0, row
col = mpos
return col, row
def _move_cursor_word_right(self, index=None):
pos = index or self.cursor_index()
col, row = self.get_cursor_from_index(pos)
lines = self._lines
mrow = len(lines) - 1
if row == mrow and col == len(lines[row]):
return col, row
if col == len(lines[row]):
row += 1
col = 0
while True:
matches = list(self._re_whitespace.finditer(lines[row], col))
if not matches:
if col == len(lines[row]):
if row == mrow:
return col, row
row += 1
col = 0
continue
return len(lines[row]), row
match = matches[0]
mpos = match.start()
if mpos == col:
if len(matches) > 1:
match = matches[1]
mpos = match.start()
else:
if match.end() == len(lines[row]):
if row == mrow:
return col, row
row += 1
col = 0
continue
return len(lines[row]), row
col = mpos
return col, row
def _expand_range(self, ifrom, ito=None):
if ito is None:
ito = ifrom
rfrom = self.get_cursor_from_index(ifrom)[1]
rtcol, rto = self.get_cursor_from_index(ito)
rfrom, rto = self._expand_rows(rfrom, rto + 1 if rtcol else rto)
return (self.cursor_index((0, rfrom)),
self.cursor_index((0, rto)))
def _expand_rows(self, rfrom, rto=None):
if rto is None or rto == rfrom:
rto = rfrom + 1
lines = self._lines
flags = list(reversed(self._lines_flags))
while rfrom > 0 and not (flags[rfrom - 1] & FL_IS_NEWLINE):
rfrom -= 1
rmax = len(lines) - 1
while 0 < rto < rmax and not (flags[rto - 1] & FL_IS_NEWLINE):
rto += 1
return max(0, rfrom), min(rmax, rto)
def _shift_lines(self, direction, rows=None, old_cursor=None,
from_undo=False):
if self._selection_callback:
if from_undo:
self._selection_callback.cancel()
else:
return
lines = self._lines
flags = list(reversed(self._lines_flags))
labels = self._lines_labels
rects = self._lines_rects
orig_cursor = self.cursor
sel = None
if old_cursor is not None:
self.cursor = old_cursor
if not rows:
sindex = self.selection_from
eindex = self.selection_to
if (sindex or eindex) and sindex != eindex:
sindex, eindex = tuple(sorted((sindex, eindex)))
sindex, eindex = self._expand_range(sindex, eindex)
else:
sindex, eindex = self._expand_range(self.cursor_index())
srow = self.get_cursor_from_index(sindex)[1]
erow = self.get_cursor_from_index(eindex)[1]
sel = sindex, eindex
if direction < 0 and srow > 0:
psrow, perow = self._expand_rows(srow - 1)
rows = ((srow, erow), (psrow, perow))
elif direction > 0 and erow < len(lines) - 1:
psrow, perow = self._expand_rows(erow)
rows = ((srow, erow), (psrow, perow))
if rows:
(srow, erow), (psrow, perow) = rows
if direction < 0:
m1srow, m1erow = psrow, perow
m2srow, m2erow = srow, erow
cdiff = psrow - perow
xdiff = srow - erow
else:
m1srow, m1erow = srow, erow
m2srow, m2erow = psrow, perow
cdiff = perow - psrow
xdiff = erow - srow
self._lines_flags = list(reversed(
flags[:m1srow] + flags[m2srow:m2erow] + flags[m1srow:m1erow] +
flags[m2erow:]))
self._lines = (lines[:m1srow] + lines[m2srow:m2erow] +
lines[m1srow:m1erow] + lines[m2erow:])
self._lines_labels = (labels[:m1srow] + labels[m2srow:m2erow] +
labels[m1srow:m1erow] + labels[m2erow:])
self._lines_rects = (rects[:m1srow] + rects[m2srow:m2erow] +
rects[m1srow:m1erow] + rects[m2erow:])
self._trigger_update_graphics()
csrow = srow + cdiff
cerow = erow + cdiff
sel = (self.cursor_index((0, csrow)),
self.cursor_index((0, cerow)))
self.cursor = self.cursor_col, self.cursor_row + cdiff
if not from_undo:
undo_rows = ((srow + cdiff, erow + cdiff),
(psrow - xdiff, perow - xdiff))
self._undo.append({
'undo_command': ('shiftln', direction * -1, undo_rows,
self.cursor),
'redo_command': ('shiftln', direction, rows, orig_cursor),
})
self._redo = []
if sel:
def cb(dt):
self.select_text(*sel)
self._selection_callback = None
self._selection_callback = Clock.schedule_once(cb)
def do_cursor_movement(self, action, control=False, alt=False):
'''Move the cursor relative to it's current position.
Action can be one of :
- cursor_left: move the cursor to the left
- cursor_right: move the cursor to the right
- cursor_up: move the cursor on the previous line
- cursor_down: move the cursor on the next line
- cursor_home: move the cursor at the start of the current line
- cursor_end: move the cursor at the end of current line
- cursor_pgup: move one "page" before
- cursor_pgdown: move one "page" after
In addition, the behavior of certain actions can be modified:
- control + cursor_left: move the cursor one word to the left
- control + cursor_right: move the cursor one word to the right
- control + cursor_up: scroll up one line
- control + cursor_down: scroll down one line
- control + cursor_home: go to beginning of text
- control + cursor_end: go to end of text
- alt + cursor_up: shift line(s) up
- alt + cursor_down: shift line(s) down
.. versionchanged:: 1.9.1
'''
pgmove_speed = int(self.height /
(self.line_height + self.line_spacing) - 1)
col, row = self.cursor
if action == 'cursor_up':
if self.multiline and control:
self.scroll_y = max(0, self.scroll_y - self.line_height)
elif not self.readonly and self.multiline and alt:
self._shift_lines(-1)
return
else:
row = max(row - 1, 0)
col = min(len(self._lines[row]), col)
elif action == 'cursor_down':
if self.multiline and control:
maxy = self.minimum_height - self.height
self.scroll_y = max(0, min(maxy,
self.scroll_y + self.line_height))
elif not self.readonly and self.multiline and alt:
self._shift_lines(1)
return
else:
row = min(row + 1, len(self._lines) - 1)
col = min(len(self._lines[row]), col)
elif action == 'cursor_left':
if not self.password and control:
col, row = self._move_cursor_word_left()
else:
if col == 0:
if row:
row -= 1
col = len(self._lines[row])
else:
col, row = col - 1, row
elif action == 'cursor_right':
if not self.password and control:
col, row = self._move_cursor_word_right()
else:
if col == len(self._lines[row]):
if row < len(self._lines) - 1:
col = 0
row += 1
else:
col, row = col + 1, row
elif action == 'cursor_home':
col = 0
if control:
row = 0
elif action == 'cursor_end':
if control:
row = len(self._lines) - 1
col = len(self._lines[row])
elif action == 'cursor_pgup':
row = max(0, row - pgmove_speed)
col = min(len(self._lines[row]), col)
elif action == 'cursor_pgdown':
row = min(row + pgmove_speed, len(self._lines) - 1)
col = min(len(self._lines[row]), col)
self.cursor = (col, row)
def get_cursor_from_xy(self, x, y):
'''Return the (row, col) of the cursor from an (x, y) position.
'''
padding_left = self.padding[0]
padding_top = self.padding[1]
l = self._lines
dy = self.line_height + self.line_spacing
cx = x - self.x
scrl_y = self.scroll_y
scrl_x = self.scroll_x
scrl_y = scrl_y / dy if scrl_y > 0 else 0
cy = (self.top - padding_top + scrl_y * dy) - y
cy = int(boundary(round(cy / dy - 0.5), 0, len(l) - 1))
_get_text_width = self._get_text_width
_tab_width = self.tab_width
_label_cached = self._label_cached
for i in range(0, len(l[cy])):
if _get_text_width(l[cy][:i], _tab_width, _label_cached) + \
_get_text_width(l[cy][i], _tab_width, _label_cached)*0.6 + \
padding_left > cx + scrl_x:
cx = i
break
return cx, cy
#
# Selection control
#
def cancel_selection(self):
'''Cancel current selection (if any).
'''
self._selection_from = self._selection_to = self.cursor_index()
self._selection = False
self._selection_finished = True
self._selection_touch = None
self._trigger_update_graphics()
def delete_selection(self, from_undo=False):
'''Delete the current text selection (if any).
'''
if self.readonly:
return
self._hide_handles(EventLoop.window)
scrl_x = self.scroll_x
scrl_y = self.scroll_y
cc, cr = self.cursor
if not self._selection:
return
v = self._get_text(encode=False)
a, b = self._selection_from, self._selection_to
if a > b:
a, b = b, a
self.cursor = cursor = self.get_cursor_from_index(a)
start = cursor
finish = self.get_cursor_from_index(b)
cur_line = self._lines[start[1]][:start[0]] +\
self._lines[finish[1]][finish[0]:]
lines, lineflags = self._split_smart(cur_line)
len_lines = len(lines)
if start[1] == finish[1]:
self._set_line_text(start[1], cur_line)
else:
self._refresh_text_from_property('del', start[1], finish[1], lines,
lineflags, len_lines)
self.scroll_x = scrl_x
self.scroll_y = scrl_y
# handle undo and redo for delete selecttion
self._set_unredo_delsel(a, b, v[a:b], from_undo)
self.cancel_selection()
def _set_unredo_delsel(self, a, b, substring, from_undo):
# handle undo and redo for backspace
if from_undo:
return
self._undo.append({
'undo_command': ('delsel', a, substring),
'redo_command': (a, b)})
# reset redo when undo is appended to
self._redo = []
def _update_selection(self, finished=False):
'''Update selection text and order of from/to if finished is True.
Can be called multiple times until finished is True.
'''
a, b = self._selection_from, self._selection_to
if a > b:
a, b = b, a
self._selection_finished = finished
_selection_text = self._get_text(encode=False)[a:b]
self.selection_text = ("" if not self.allow_copy else
((self.password_mask * (b - a)) if
self.password else _selection_text))
if not finished:
self._selection = True
else:
self._selection = bool(len(_selection_text))
self._selection_touch = None
if a == 0:
# update graphics only on new line
# allows smoother scrolling, noticeably
# faster when dealing with large text.
self._update_graphics_selection()
#self._trigger_update_graphics()
#
# Touch control
#
def long_touch(self, dt):
if self._selection_to == self._selection_from:
pos = self.to_local(*self._long_touch_pos, relative=True)
self._show_cut_copy_paste(
pos, EventLoop.window, mode='paste')
def on_double_tap(self):
'''This event is dispatched when a double tap happens
inside TextInput. The default behavior is to select the
word around the current cursor position. Override this to provide
different behavior. Alternatively, you can bind to this
event to provide additional functionality.
'''
ci = self.cursor_index()
cc = self.cursor_col
line = self._lines[self.cursor_row]
len_line = len(line)
start = max(0, len(line[:cc]) - line[:cc].rfind(u' ') - 1)
end = line[cc:].find(u' ')
end = end if end > - 1 else (len_line - cc)
Clock.schedule_once(lambda dt: self.select_text(ci - start, ci + end))
def on_triple_tap(self):
'''This event is dispatched when a triple tap happens
inside TextInput. The default behavior is to select the
line around current cursor position. Override this to provide
different behavior. Alternatively, you can bind to this
event to provide additional functionality.
'''
ci = self.cursor_index()
sindex, eindex = self._expand_range(ci)
Clock.schedule_once(lambda dt: self.select_text(sindex, eindex))
def on_quad_touch(self):
'''This event is dispatched when four fingers are touching
inside TextInput. The default behavior is to select all text.
Override this to provide different behavior. Alternatively,
you can bind to this event to provide additional functionality.
'''
Clock.schedule_once(lambda dt: self.select_all())
def on_touch_down(self, touch):
if self.disabled:
return
touch_pos = touch.pos
if not self.collide_point(*touch_pos):
return False
if super(TextInput, self).on_touch_down(touch):
return True
# Check for scroll wheel
if 'button' in touch.profile and touch.button.startswith('scroll'):
scroll_type = touch.button[6:]
if scroll_type == 'down':
if self.multiline:
if self.scroll_y <= 0:
return
self.scroll_y -= self.line_height
else:
if self.scroll_x <= 0:
return
self.scroll_x -= self.line_height
if scroll_type == 'up':
if self.multiline:
if (self._lines_rects[-1].pos[1] > self.y +
self.line_height):
return
self.scroll_y += self.line_height
else:
if (self.scroll_x + self.width >=
self._lines_rects[-1].texture.size[0]):
return
self.scroll_x += self.line_height
touch.grab(self)
self._touch_count += 1
if touch.is_double_tap:
self.dispatch('on_double_tap')
if touch.is_triple_tap:
self.dispatch('on_triple_tap')
if self._touch_count == 4:
self.dispatch('on_quad_touch')
self._hide_cut_copy_paste(EventLoop.window)
# schedule long touch for paste
self._long_touch_pos = touch.pos
Clock.schedule_once(self.long_touch, .5)
self.cursor = self.get_cursor_from_xy(*touch_pos)
if not self._selection_touch:
self.cancel_selection()
self._selection_touch = touch
self._selection_from = self._selection_to = self.cursor_index()
self._update_selection()
if CutBuffer and 'button' in touch.profile and touch.button == 'middle':
self.insert_text(CutBuffer.get_cutbuffer())
return True
return False
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
if not self.focus:
touch.ungrab(self)
if self._selection_touch is touch:
self._selection_touch = None
return False
if self._selection_touch is touch:
self.cursor = self.get_cursor_from_xy(touch.x, touch.y)
self._selection_to = self.cursor_index()
self._update_selection()
return True
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
self._touch_count -= 1
# schedule long touch for paste
Clock.unschedule(self.long_touch)
if not self.focus:
return False
if self._selection_touch is touch:
self._selection_to = self.cursor_index()
self._update_selection(True)
# show Bubble
win = EventLoop.window
if self._selection_to != self._selection_from:
self._show_cut_copy_paste(touch.pos, win)
elif self.use_handles:
self._hide_handles()
handle_middle = self._handle_middle
if handle_middle is None:
self._handle_middle = handle_middle = Selector(
source=self.handle_image_middle,
window=win,
target=self,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_middle.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
if not self._handle_middle.parent and self.text:
EventLoop.window.add_widget(handle_middle, canvas='after')
self._position_handles(mode='middle')
return True
def _handle_pressed(self, instance):
self._hide_cut_copy_paste()
sf, st = self._selection_from, self.selection_to
if sf > st:
self._selection_from, self._selection_to = st, sf
def _handle_released(self, instance):
sf, st = self._selection_from, self.selection_to
if sf == st:
return
self._update_selection()
self._show_cut_copy_paste(
(instance.right if instance is self._handle_left else instance.x,
instance.top + self.line_height),
EventLoop.window)
def _handle_move(self, instance, touch):
if touch.grab_current != instance:
return
get_cursor = self.get_cursor_from_xy
handle_right = self._handle_right
handle_left = self._handle_left
handle_middle = self._handle_middle
try:
touch.push()
touch.apply_transform_2d(self.to_widget)
x, y = touch.pos
finally:
touch.pop()
cursor = get_cursor(
x,
y + instance._touch_diff + (self.line_height / 2))
if instance != touch.grab_current:
return
if instance == handle_middle:
self.cursor = cursor
self._position_handles(mode='middle')
return
ci = self.cursor_index(cursor=cursor)
sf, st = self._selection_from, self.selection_to
if instance == handle_left:
self._selection_from = ci
elif instance == handle_right:
self._selection_to = ci
self._trigger_update_graphics()
self._trigger_position_handles()
def _position_handles(self, *args, **kwargs):
if not self.text:
return
mode = kwargs.get('mode', 'both')
lh = self.line_height
handle_middle = self._handle_middle
if handle_middle:
hp_mid = self.cursor_pos
pos = self.to_local(*hp_mid, relative=True)
handle_middle.x = pos[0] - handle_middle.width / 2
handle_middle.top = pos[1] - lh
if mode[0] == 'm':
return
group = self.canvas.get_group('selection')
if not group:
return
EventLoop.window.remove_widget(self._handle_middle)
handle_left = self._handle_left
if not handle_left:
return
hp_left = group[2].pos
handle_left.pos = self.to_local(*hp_left, relative=True)
handle_left.x -= handle_left.width
handle_left.y -= handle_left.height
handle_right = self._handle_right
last_rect = group[-1]
hp_right = last_rect.pos[0], last_rect.pos[1]
x, y = self.to_local(*hp_right, relative=True)
handle_right.x = x + last_rect.size[0]
handle_right.y = y - handle_right.height
def _hide_handles(self, win=None):
win = win or EventLoop.window
if win is None:
return
win.remove_widget(self._handle_right)
win.remove_widget(self._handle_left)
win.remove_widget(self._handle_middle)
def _show_handles(self, dt):
if not self.use_handles or not self.text:
return
win = EventLoop.window
handle_right = self._handle_right
handle_left = self._handle_left
if self._handle_left is None:
self._handle_left = handle_left = Selector(
source=self.handle_image_left,
target=self,
window=win,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_left.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
self._handle_right = handle_right = Selector(
source=self.handle_image_right,
target=self,
window=win,
size_hint=(None, None),
size=('45dp', '45dp'))
handle_right.bind(on_press=self._handle_pressed,
on_touch_move=self._handle_move,
on_release=self._handle_released)
else:
if self._handle_left.parent:
self._position_handles()
return
if not self.parent:
return
self._trigger_position_handles()
if self.selection_from != self.selection_to:
self._handle_left.opacity = self._handle_right.opacity = 0
win.add_widget(self._handle_left, canvas='after')
win.add_widget(self._handle_right, canvas='after')
anim = Animation(opacity=1, d=.4)
anim.start(self._handle_right)
anim.start(self._handle_left)
def _show_cut_copy_paste(self, pos, win, parent_changed=False,
mode='', pos_in_window=False, *l):
# Show a bubble with cut copy and paste buttons
if not self.use_bubble:
return
bubble = self._bubble
if bubble is None:
self._bubble = bubble = TextInputCutCopyPaste(textinput=self)
self.fbind('parent', self._show_cut_copy_paste, pos, win, True)
win.bind(
size=lambda *args: self._hide_cut_copy_paste(win))
self.bind(cursor_pos=lambda *args: self._hide_cut_copy_paste(win))
else:
win.remove_widget(bubble)
if not self.parent:
return
if parent_changed:
return
# Search the position from the touch to the window
lh, ls = self.line_height, self.line_spacing
x, y = pos
t_pos = (x, y) if pos_in_window else self.to_window(x, y)
bubble_size = bubble.size
bubble_hw = bubble_size[0] / 2.
win_size = win.size
bubble_pos = (t_pos[0], t_pos[1] + inch(.25))
if (bubble_pos[0] - bubble_hw) < 0:
# bubble beyond left of window
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble_pos = (bubble_hw, (t_pos[1]) - (lh + ls + inch(.25)))
bubble.arrow_pos = 'top_left'
else:
bubble_pos = (bubble_hw, bubble_pos[1])
bubble.arrow_pos = 'bottom_left'
elif (bubble_pos[0] + bubble_hw) > win_size[0]:
# bubble beyond right of window
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble_pos = (win_size[0] - bubble_hw,
(t_pos[1]) - (lh + ls + inch(.25)))
bubble.arrow_pos = 'top_right'
else:
bubble_pos = (win_size[0] - bubble_hw, bubble_pos[1])
bubble.arrow_pos = 'bottom_right'
else:
if bubble_pos[1] > (win_size[1] - bubble_size[1]):
# bubble above window height
bubble_pos = (bubble_pos[0],
(t_pos[1]) - (lh + ls + inch(.25)))
bubble.arrow_pos = 'top_mid'
else:
bubble.arrow_pos = 'bottom_mid'
bubble_pos = self.to_widget(*bubble_pos)
bubble.center_x = bubble_pos[0]
if bubble.arrow_pos[0] == 't':
bubble.top = bubble_pos[1]
else:
bubble.y = bubble_pos[1]
bubble.mode = mode
Animation.cancel_all(bubble)
bubble.opacity = 0
win.add_widget(bubble, canvas='after')
Animation(opacity=1, d=.225).start(bubble)
def _hide_cut_copy_paste(self, win=None):
bubble = self._bubble
if not bubble:
return
bubble.hide()
#
# Private
#
@staticmethod
def _reload_remove_observer(wr):
# called when the textinput is deleted
if wr in _textinput_list:
_textinput_list.remove(wr)
def _on_textinput_focused(self, instance, value, *largs):
self.focus = value
win = EventLoop.window
self.cancel_selection()
self._hide_cut_copy_paste(win)
if value:
if (not (self.readonly or self.disabled) or _is_desktop and
self._keyboard_mode == 'system'):
Clock.schedule_interval(self._do_blink_cursor, 1 / 2.)
self._editable = True
else:
self._editable = False
else:
Clock.unschedule(self._do_blink_cursor)
self._hide_handles(win)
def _ensure_clipboard(self):
global Clipboard, CutBuffer
if not Clipboard:
from kivy.core.clipboard import Clipboard, CutBuffer
def cut(self):
''' Copy current selection to clipboard then delete it from TextInput.
.. versionadded:: 1.8.0
'''
self._cut(self.selection_text)
def _cut(self, data):
self._ensure_clipboard()
Clipboard.copy(data)
self.delete_selection()
def copy(self, data=''):
''' Copy the value provided in argument `data` into current clipboard.
If data is not of type string it will be converted to string.
If no data is provided then current selection if present is copied.
.. versionadded:: 1.8.0
'''
self._ensure_clipboard()
if data:
return Clipboard.copy(data)
if self.selection_text:
return Clipboard.copy(self.selection_text)
def paste(self):
''' Insert text from system :class:`~kivy.core.clipboard.Clipboard`
into the :class:`~kivy.uix.textinput.TextInput` at current cursor
position.
.. versionadded:: 1.8.0
'''
self._ensure_clipboard()
data = Clipboard.paste()
self.delete_selection()
self.insert_text(data)
def _update_cutbuffer(self, *args):
CutBuffer.set_cutbuffer(self.selection_text)
def _get_text_width(self, text, tab_width, _label_cached):
# Return the width of a text, according to the current line options
kw = self._get_line_options()
try:
cid = u'{}\0{}\0{}'.format(text, self.password, kw)
except UnicodeDecodeError:
cid = '{}\0{}\0{}'.format(text, self.password, kw)
width = Cache_get('textinput.width', cid)
if width:
return width
if not _label_cached:
_label_cached = self._label_cached
text = text.replace('\t', ' ' * tab_width)
if not self.password:
width = _label_cached.get_extents(text)[0]
else:
width = _label_cached.get_extents(
self.password_mask * len(text))[0]
Cache_append('textinput.width', cid, width)
return width
def _do_blink_cursor(self, dt):
# Callback called by the timer to blink the cursor, according to the
# last activity in the widget
b = (Clock.get_time() - self._cursor_blink_time)
self.cursor_blink = int(b * 2) % 2
def on_cursor(self, instance, value):
# When the cursor is moved, reset the activity timer, and update all
# the graphics.
self._cursor_blink_time = Clock.get_time()
self._trigger_update_graphics()
def _delete_line(self, idx):
# Delete current line, and fix cursor position
assert(idx < len(self._lines))
self._lines_flags.pop(idx)
self._lines_labels.pop(idx)
self._lines.pop(idx)
self.cursor = self.cursor
def _set_line_text(self, line_num, text):
# Set current line with other text than the default one.
self._lines_labels[line_num] = self._create_line_label(text)
self._lines[line_num] = text
def _trigger_refresh_line_options(self, *largs):
Clock.unschedule(self._refresh_line_options)
Clock.schedule_once(self._refresh_line_options, 0)
def _refresh_line_options(self, *largs):
self._line_options = None
self._get_line_options()
self._refresh_text_from_property()
self._refresh_hint_text()
self.cursor = self.get_cursor_from_index(len(self.text))
def _trigger_refresh_text(self, *largs):
if len(largs) and largs[0] == self:
largs = ()
Clock.unschedule(lambda dt: self._refresh_text_from_property(*largs))
Clock.schedule_once(lambda dt:
self._refresh_text_from_property(*largs))
def _update_text_options(self, *largs):
Cache_remove('textinput.width')
self._trigger_refresh_text()
def _refresh_text_from_trigger(self, dt, *largs):
self._refresh_text_from_property(*largs)
def _refresh_text_from_property(self, *largs):
self._refresh_text(self._get_text(encode=False), *largs)
def _refresh_text(self, text, *largs):
# Refresh all the lines from a new text.
# By using cache in internal functions, this method should be fast.
mode = 'all'
if len(largs) > 1:
mode, start, finish, _lines, _lines_flags, len_lines = largs
#start = max(0, start)
cursor = None
else:
cursor = self.cursor_index()
_lines, self._lines_flags = self._split_smart(text)
_lines_labels = []
_line_rects = []
_create_label = self._create_line_label
for x in _lines:
lbl = _create_label(x)
_lines_labels.append(lbl)
_line_rects.append(Rectangle(size=lbl.size))
if mode == 'all':
self._lines_labels = _lines_labels
self._lines_rects = _line_rects
self._lines = _lines
elif mode == 'del':
if finish > start:
self._insert_lines(start,
finish if start == finish else (finish + 1),
len_lines, _lines_flags,
_lines, _lines_labels, _line_rects)
elif mode == 'insert':
self._insert_lines(
start,
finish if (start == finish and not len_lines)
else (finish + 1),
len_lines, _lines_flags, _lines, _lines_labels,
_line_rects)
min_line_ht = self._label_cached.get_extents('_')[1]
# with markup texture can be of height `1`
self.line_height = max(_lines_labels[0].height, min_line_ht)
#self.line_spacing = 2
# now, if the text change, maybe the cursor is not at the same place as
# before. so, try to set the cursor on the good place
row = self.cursor_row
self.cursor = self.get_cursor_from_index(self.cursor_index()
if cursor is None else cursor)
# if we back to a new line, reset the scroll, otherwise, the effect is
# ugly
if self.cursor_row != row:
self.scroll_x = 0
# with the new text don't forget to update graphics again
self._trigger_update_graphics()
def _insert_lines(self, start, finish, len_lines, _lines_flags,
_lines, _lines_labels, _line_rects):
self_lines_flags = self._lines_flags
_lins_flags = []
_lins_flags.extend(self_lines_flags[:start])
if len_lines:
# if not inserting at first line then
if start:
# make sure line flags restored for first line
# _split_smart assumes first line to be not a new line
_lines_flags[0] = self_lines_flags[start]
_lins_flags.extend(_lines_flags)
_lins_flags.extend(self_lines_flags[finish:])
self._lines_flags = _lins_flags
_lins_lbls = []
_lins_lbls.extend(self._lines_labels[:start])
if len_lines:
_lins_lbls.extend(_lines_labels)
_lins_lbls.extend(self._lines_labels[finish:])
self._lines_labels = _lins_lbls
_lins_rcts = []
_lins_rcts.extend(self._lines_rects[:start])
if len_lines:
_lins_rcts.extend(_line_rects)
_lins_rcts.extend(self._lines_rects[finish:])
self._lines_rects = _lins_rcts
_lins = []
_lins.extend(self._lines[:start])
if len_lines:
_lins.extend(_lines)
_lins.extend(self._lines[finish:])
self._lines = _lins
def _trigger_update_graphics(self, *largs):
Clock.unschedule(self._update_graphics)
Clock.schedule_once(self._update_graphics, -1)
def _update_graphics(self, *largs):
# Update all the graphics according to the current internal values.
#
# This is a little bit complex, cause we have to :
# - handle scroll_x
# - handle padding
# - create rectangle for the lines matching the viewport
# - crop the texture coordinates to match the viewport
#
# This is the first step of graphics, the second is the selection.
self.canvas.clear()
add = self.canvas.add
lh = self.line_height
dy = lh + self.line_spacing
# adjust view if the cursor is going outside the bounds
sx = self.scroll_x
sy = self.scroll_y
# draw labels
if not self._lines or (
not self._lines[0] and len(self._lines) == 1):
rects = self._hint_text_rects
labels = self._hint_text_labels
lines = self._hint_text_lines
else:
rects = self._lines_rects
labels = self._lines_labels
lines = self._lines
padding_left, padding_top, padding_right, padding_bottom = self.padding
x = self.x + padding_left
y = self.top - padding_top + sy
miny = self.y + padding_bottom
maxy = self.top - padding_top
for line_num, value in enumerate(lines):
if miny <= y <= maxy + dy:
texture = labels[line_num]
size = list(texture.size)
texc = texture.tex_coords[:]
# calcul coordinate
viewport_pos = sx, 0
vw = self.width - padding_left - padding_right
vh = self.height - padding_top - padding_bottom
tw, th = list(map(float, size))
oh, ow = tch, tcw = texc[1:3]
tcx, tcy = 0, 0
# adjust size/texcoord according to viewport
if viewport_pos:
tcx, tcy = viewport_pos
tcx = tcx / tw * (ow)
tcy = tcy / th * oh
if tw - viewport_pos[0] < vw:
tcw = tcw - tcx
size[0] = tcw * size[0]
elif vw < tw:
tcw = (vw / tw) * tcw
size[0] = vw
if vh < th:
tch = (vh / th) * tch
size[1] = vh
# cropping
mlh = lh
if y > maxy:
vh = (maxy - y + lh)
tch = (vh / float(lh)) * oh
tcy = oh - tch
size[1] = vh
if y - lh < miny:
diff = miny - (y - lh)
y += diff
vh = lh - diff
tch = (vh / float(lh)) * oh
size[1] = vh
texc = (
tcx,
tcy + tch,
tcx + tcw,
tcy + tch,
tcx + tcw,
tcy,
tcx,
tcy)
# add rectangle.
r = rects[line_num]
r.pos = int(x), int(y - mlh)
r.size = size
r.texture = texture
r.tex_coords = texc
add(r)
y -= dy
self._update_graphics_selection()
def _update_graphics_selection(self):
if not self._selection:
return
self.canvas.remove_group('selection')
dy = self.line_height + self.line_spacing
rects = self._lines_rects
padding_top = self.padding[1]
padding_bottom = self.padding[3]
_top = self.top
y = _top - padding_top + self.scroll_y
miny = self.y + padding_bottom
maxy = _top - padding_top
draw_selection = self._draw_selection
a, b = self._selection_from, self._selection_to
if a > b:
a, b = b, a
get_cursor_from_index = self.get_cursor_from_index
s1c, s1r = get_cursor_from_index(a)
s2c, s2r = get_cursor_from_index(b)
s2r += 1
# pass only the selection lines[]
# passing all the lines can get slow when dealing with a lot of text
y -= s1r * dy
_lines = self._lines
_get_text_width = self._get_text_width
tab_width = self.tab_width
_label_cached = self._label_cached
width = self.width
padding_left = self.padding[0]
padding_right = self.padding[2]
x = self.x
canvas_add = self.canvas.add
selection_color = self.selection_color
for line_num, value in enumerate(_lines[s1r:s2r], start=s1r):
if miny <= y <= maxy + dy:
r = rects[line_num]
draw_selection(r.pos, r.size, line_num, (s1c, s1r),
(s2c, s2r - 1), _lines, _get_text_width,
tab_width, _label_cached, width,
padding_left, padding_right, x,
canvas_add, selection_color)
y -= dy
self._position_handles('both')
def _draw_selection(self, *largs):
pos, size, line_num, (s1c, s1r), (s2c, s2r),\
_lines, _get_text_width, tab_width, _label_cached, width,\
padding_left, padding_right, x, canvas_add, selection_color = largs
# Draw the current selection on the widget.
if line_num < s1r or line_num > s2r:
return
x, y = pos
w, h = size
x1 = x
x2 = x + w
if line_num == s1r:
lines = _lines[line_num]
x1 -= self.scroll_x
x1 += _get_text_width(lines[:s1c], tab_width, _label_cached)
if line_num == s2r:
lines = _lines[line_num]
x2 = (x - self.scroll_x) + _get_text_width(lines[:s2c],
tab_width,
_label_cached)
width_minus_padding = width - (padding_right + padding_left)
maxx = x + width_minus_padding
if x1 > maxx:
return
x1 = max(x1, x)
x2 = min(x2, x + width_minus_padding)
canvas_add(Color(*selection_color, group='selection'))
canvas_add(Rectangle(
pos=(x1, pos[1]), size=(x2 - x1, size[1]), group='selection'))
def on_size(self, instance, value):
# if the size change, we might do invalid scrolling / text split
# size the text maybe be put after size_hint have been resolved.
self._trigger_refresh_text()
self._refresh_hint_text()
self.scroll_x = self.scroll_y = 0
def _get_cursor_pos(self):
# return the current cursor x/y from the row/col
dy = self.line_height + self.line_spacing
padding_left = self.padding[0]
padding_top = self.padding[1]
left = self.x + padding_left
top = self.top - padding_top
y = top + self.scroll_y
y -= self.cursor_row * dy
x, y = left + self.cursor_offset() - self.scroll_x, y
if x < left:
self.scroll_x = 0
x = left
if y > top:
y = top
self.scroll_y = 0
return x, y
def _get_line_options(self):
# Get or create line options, to be used for Label creation
if self._line_options is None:
self._line_options = kw = {
'font_size': self.font_size,
'font_name': self.font_name,
'anchor_x': 'left',
'anchor_y': 'top',
'padding_x': 0,
'padding_y': 0,
'padding': (0, 0)}
self._label_cached = Label(**kw)
return self._line_options
def _create_line_label(self, text, hint=False):
# Create a label from a text, using line options
ntext = text.replace(u'\n', u'').replace(u'\t', u' ' * self.tab_width)
if self.password and not hint: # Don't replace hint_text with *
ntext = self.password_mask * len(ntext)
kw = self._get_line_options()
cid = '%s\0%s' % (ntext, str(kw))
texture = Cache_get('textinput.label', cid)
if texture is None:
# FIXME right now, we can't render very long line...
# if we move on "VBO" version as fallback, we won't need to
# do this. try to found the maximum text we can handle
label = None
label_len = len(ntext)
ld = None
# check for blank line
if not ntext:
texture = Texture.create(size=(1, 1))
Cache_append('textinput.label', cid, texture)
return texture
while True:
try:
label = Label(text=ntext[:label_len], **kw)
label.refresh()
if ld is not None and ld > 2:
ld = int(ld / 2)
label_len += ld
else:
break
except:
# exception happen when we tried to render the text
# reduce it...
if ld is None:
ld = len(ntext)
ld = int(ld / 2)
if ld < 2 and label_len:
label_len -= 1
label_len -= ld
continue
# ok, we found it.
texture = label.texture
Cache_append('textinput.label', cid, texture)
return texture
def _tokenize(self, text):
# Tokenize a text string from some delimiters
if text is None:
return
delimiters = u' ,\'".;:\n\r\t'
oldindex = 0
for index, char in enumerate(text):
if char not in delimiters:
continue
if oldindex != index:
yield text[oldindex:index]
yield text[index:index + 1]
oldindex = index + 1
yield text[oldindex:]
def _split_smart(self, text):
# Do a "smart" split. If autowidth or autosize is set,
# we are not doing smart split, just a split on line break.
# Otherwise, we are trying to split as soon as possible, to prevent
# overflow on the widget.
# depend of the options, split the text on line, or word
if not self.multiline:
lines = text.split(u'\n')
lines_flags = [0] + [FL_IS_NEWLINE] * (len(lines) - 1)
return lines, lines_flags
# no autosize, do wordwrap.
x = flags = 0
line = []
lines = []
lines_flags = []
_join = u''.join
lines_append, lines_flags_append = lines.append, lines_flags.append
padding_left = self.padding[0]
padding_right = self.padding[2]
width = self.width - padding_left - padding_right
text_width = self._get_text_width
_tab_width, _label_cached = self.tab_width, self._label_cached
# try to add each word on current line.
for word in self._tokenize(text):
is_newline = (word == u'\n')
w = text_width(word, _tab_width, _label_cached)
# if we have more than the width, or if it's a newline,
# push the current line, and create a new one
if (x + w > width and line) or is_newline:
lines_append(_join(line))
lines_flags_append(flags)
flags = 0
line = []
x = 0
if is_newline:
flags |= FL_IS_NEWLINE
else:
x += w
line.append(word)
if line or flags & FL_IS_NEWLINE:
lines_append(_join(line))
lines_flags_append(flags)
return lines, lines_flags
def _key_down(self, key, repeat=False):
displayed_str, internal_str, internal_action, scale = key
if internal_action is None:
if self._selection:
self.delete_selection()
self.insert_text(displayed_str)
elif internal_action in ('shift', 'shift_L', 'shift_R'):
if not self._selection:
self._selection_from = self._selection_to = self.cursor_index()
self._selection = True
self._selection_finished = False
elif internal_action == 'ctrl_L':
self._ctrl_l = True
elif internal_action == 'ctrl_R':
self._ctrl_r = True
elif internal_action == 'alt_L':
self._alt_l = True
elif internal_action == 'alt_R':
self._alt_r = True
elif internal_action.startswith('cursor_'):
cc, cr = self.cursor
self.do_cursor_movement(internal_action,
self._ctrl_l or self._ctrl_r,
self._alt_l or self._alt_r)
if self._selection and not self._selection_finished:
self._selection_to = self.cursor_index()
self._update_selection()
else:
self.cancel_selection()
elif self._selection and internal_action in ('del', 'backspace'):
self.delete_selection()
elif internal_action == 'del':
# Move cursor one char to the right. If that was successful,
# do a backspace (effectively deleting char right of cursor)
cursor = self.cursor
self.do_cursor_movement('cursor_right')
if cursor != self.cursor:
self.do_backspace(mode='del')
elif internal_action == 'backspace':
self.do_backspace()
elif internal_action == 'enter':
if self.multiline:
self.insert_text(u'\n')
else:
self.dispatch('on_text_validate')
self.focus = False
elif internal_action == 'escape':
self.focus = False
if internal_action != 'escape':
#self._recalc_size()
pass
def _key_up(self, key, repeat=False):
displayed_str, internal_str, internal_action, scale = key
if internal_action in ('shift', 'shift_L', 'shift_R'):
if self._selection:
self._update_selection(True)
elif internal_action == 'ctrl_L':
self._ctrl_l = False
elif internal_action == 'ctrl_R':
self._ctrl_r = False
elif internal_action == 'alt_L':
self._alt_l = False
elif internal_action == 'alt_R':
self._alt_r = False
def keyboard_on_key_down(self, window, keycode, text, modifiers):
# Keycodes on OS X:
ctrl, cmd = 64, 1024
key, key_str = keycode
win = EventLoop.window
# This allows *either* ctrl *or* cmd, but not both.
is_shortcut = (modifiers == ['ctrl'] or (
_is_osx and modifiers == ['meta']))
is_interesting_key = key in (list(self.interesting_keys.keys()) + [27])
if not self.write_tab and super(TextInput,
self).keyboard_on_key_down(window, keycode, text, modifiers):
return True
if not self._editable:
# duplicated but faster testing for non-editable keys
if text and not is_interesting_key:
if is_shortcut and key == ord('c'):
self.copy()
elif key == 27:
self.focus = False
return True
if text and not is_interesting_key:
self._hide_handles(win)
self._hide_cut_copy_paste(win)
win.remove_widget(self._handle_middle)
# check for command modes
# we use \x01INFO\x02 to get info from IME on mobiles
# pygame seems to pass \x01 as the unicode for ctrl+a
# checking for modifiers ensures conflict resolution.
first_char = ord(text[0])
if not modifiers and first_char == 1:
self._command_mode = True
self._command = ''
if not modifiers and first_char == 2:
self._command_mode = False
self._command = self._command[1:]
if self._command_mode:
self._command += text
return
_command = self._command
if _command and first_char == 2:
from_undo = True
_command, data = _command.split(':')
self._command = ''
if self._selection:
self.delete_selection()
if _command == 'DEL':
count = int(data)
if not count:
self.delete_selection(from_undo=True)
end = self.cursor_index()
self._selection_from = max(end - count, 0)
self._selection_to = end
self._selection = True
self.delete_selection(from_undo=True)
return
elif _command == 'INSERT':
self.insert_text(data, from_undo)
elif _command == 'INSERTN':
from_undo = False
self.insert_text(data, from_undo)
elif _command == 'SELWORD':
self.dispatch('on_double_tap')
elif _command == 'SEL':
if data == '0':
Clock.schedule_once(lambda dt: self.cancel_selection())
elif _command == 'CURCOL':
self.cursor = int(data), self.cursor_row
return
if is_shortcut:
if key == ord('x'): # cut selection
self._cut(self.selection_text)
elif key == ord('c'): # copy selection
self.copy()
elif key == ord('v'): # paste selection
self.paste()
elif key == ord('a'): # select all
self.select_all()
elif key == ord('z'): # undo
self.do_undo()
elif key == ord('r'): # redo
self.do_redo()
else:
if EventLoop.window.__class__.__module__ == \
'kivy.core.window.window_sdl2':
return
if self._selection:
self.delete_selection()
self.insert_text(text)
#self._recalc_size()
return
if is_interesting_key:
self._hide_cut_copy_paste(win)
self._hide_handles(win)
if key == 27: # escape
self.focus = False
return True
elif key == 9: # tab
self.insert_text(u'\t')
return True
k = self.interesting_keys.get(key)
if k:
key = (None, None, k, 1)
self._key_down(key)
def keyboard_on_key_up(self, window, keycode):
key, key_str = keycode
k = self.interesting_keys.get(key)
if k:
key = (None, None, k, 1)
self._key_up(key)
def keyboard_on_textinput(self, window, text):
if self._selection:
self.delete_selection()
self.insert_text(text, False)
def on_hint_text(self, instance, value):
self._refresh_hint_text()
def _refresh_hint_text(self):
_lines, self._hint_text_flags = self._split_smart(self.hint_text)
_hint_text_labels = []
_hint_text_rects = []
_create_label = self._create_line_label
for x in _lines:
lbl = _create_label(x, hint=True)
_hint_text_labels.append(lbl)
_hint_text_rects.append(Rectangle(size=lbl.size))
self._hint_text_lines = _lines
self._hint_text_labels = _hint_text_labels
self._hint_text_rects = _hint_text_rects
# Remember to update graphics
self._trigger_update_graphics()
#
# Properties
#
_lines = ListProperty([])
_hint_text_lines = ListProperty([])
_editable = BooleanProperty(True)
_insert_int_patu = re.compile(u'[^0-9]')
_insert_int_patb = re.compile(b'[^0-9]')
readonly = BooleanProperty(False)
'''If True, the user will not be able to change the content of a textinput.
.. versionadded:: 1.3.0
:attr:`readonly` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
multiline = BooleanProperty(True)
'''If True, the widget will be able show multiple lines of text. If False,
the "enter" keypress will defocus the textinput instead of adding a new
line.
:attr:`multiline` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
password = BooleanProperty(False)
'''If True, the widget will display its characters as the character
set in :attr:`password_mask`.
.. versionadded:: 1.2.0
:attr:`password` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
password_mask = StringProperty('*')
'''Sets the character used to mask the text when :attr:`password` is True.
.. versionadded:: 1.9.2
:attr:`password_mask` is a :class:`~kivy.properties.StringProperty` and
defaults to `'*'`.
'''
keyboard_suggestions = BooleanProperty(True)
'''If True provides auto suggestions on top of keyboard.
This will only work if :attr:`input_type` is set to `text`.
.. versionadded:: 1.8.0
:attr:`keyboard_suggestions` is a
:class:`~kivy.properties.BooleanProperty` defaults to True.
'''
cursor_blink = BooleanProperty(False)
'''This property is used to blink the cursor graphic. The value of
:attr:`cursor_blink` is automatically computed. Setting a value on it will
have no impact.
:attr:`cursor_blink` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
def _get_cursor(self):
return self._cursor
def _set_cursor(self, pos):
if not self._lines:
self._trigger_refresh_text()
return
l = self._lines
cr = boundary(pos[1], 0, len(l) - 1)
cc = boundary(pos[0], 0, len(l[cr]))
cursor = cc, cr
if self._cursor == cursor:
return
self._cursor = cursor
# adjust scrollview to ensure that the cursor will be always inside our
# viewport.
padding_left = self.padding[0]
padding_right = self.padding[2]
viewport_width = self.width - padding_left - padding_right
sx = self.scroll_x
offset = self.cursor_offset()
# if offset is outside the current bounds, reajust
if offset > viewport_width + sx:
self.scroll_x = offset - viewport_width
if offset < sx:
self.scroll_x = offset
# do the same for Y
# this algo try to center the cursor as much as possible
dy = self.line_height + self.line_spacing
offsety = cr * dy
sy = self.scroll_y
padding_top = self.padding[1]
padding_bottom = self.padding[3]
viewport_height = self.height - padding_top - padding_bottom - dy
if offsety > viewport_height + sy:
sy = offsety - viewport_height
if offsety < sy:
sy = offsety
self.scroll_y = sy
return True
cursor = AliasProperty(_get_cursor, _set_cursor)
'''Tuple of (row, col) values indicating the current cursor position.
You can set a new (row, col) if you want to move the cursor. The scrolling
area will be automatically updated to ensure that the cursor is
visible inside the viewport.
:attr:`cursor` is an :class:`~kivy.properties.AliasProperty`.
'''
def _get_cursor_col(self):
return self._cursor[0]
cursor_col = AliasProperty(_get_cursor_col, None, bind=('cursor', ))
'''Current column of the cursor.
:attr:`cursor_col` is an :class:`~kivy.properties.AliasProperty` to
cursor[0], read-only.
'''
def _get_cursor_row(self):
return self._cursor[1]
cursor_row = AliasProperty(_get_cursor_row, None, bind=('cursor', ))
'''Current row of the cursor.
:attr:`cursor_row` is an :class:`~kivy.properties.AliasProperty` to
cursor[1], read-only.
'''
cursor_pos = AliasProperty(_get_cursor_pos, None, bind=(
'cursor', 'padding', 'pos', 'size', 'focus',
'scroll_x', 'scroll_y'))
'''Current position of the cursor, in (x, y).
:attr:`cursor_pos` is an :class:`~kivy.properties.AliasProperty`,
read-only.
'''
cursor_color = ListProperty([1, 0, 0, 1])
'''Current color of the cursor, in (r, g, b, a) format.
.. versionadded:: 1.9.0
:attr:`cursor_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 0, 0, 1].
'''
line_height = NumericProperty(1)
'''Height of a line. This property is automatically computed from the
:attr:`font_name`, :attr:`font_size`. Changing the line_height will have
no impact.
.. note::
:attr:`line_height` is the height of a single line of text.
Use :attr:`minimum_height`, which also includes padding, to
get the height required to display the text properly.
:attr:`line_height` is a :class:`~kivy.properties.NumericProperty`,
read-only.
'''
tab_width = NumericProperty(4)
'''By default, each tab will be replaced by four spaces on the text
input widget. You can set a lower or higher value.
:attr:`tab_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 4.
'''
padding_x = VariableListProperty([0, 0], length=2)
'''Horizontal padding of the text: [padding_left, padding_right].
padding_x also accepts a one argument form [padding_horizontal].
:attr:`padding_x` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0]. This might be changed by the current theme.
.. deprecated:: 1.7.0
Use :attr:`padding` instead.
'''
def on_padding_x(self, instance, value):
self.padding[0] = value[0]
self.padding[2] = value[1]
padding_y = VariableListProperty([0, 0], length=2)
'''Vertical padding of the text: [padding_top, padding_bottom].
padding_y also accepts a one argument form [padding_vertical].
:attr:`padding_y` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [0, 0]. This might be changed by the current theme.
.. deprecated:: 1.7.0
Use :attr:`padding` instead.
'''
def on_padding_y(self, instance, value):
self.padding[1] = value[0]
self.padding[3] = value[1]
padding = VariableListProperty([6, 6, 6, 6])
'''Padding of the text: [padding_left, padding_top, padding_right,
padding_bottom].
padding also accepts a two argument form [padding_horizontal,
padding_vertical] and a one argument form [padding].
.. versionchanged:: 1.7.0
Replaced AliasProperty with VariableListProperty.
:attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and
defaults to [6, 6, 6, 6].
'''
scroll_x = NumericProperty(0)
'''X scrolling value of the viewport. The scrolling is automatically
updated when the cursor is moved or text changed. If there is no
user input, the scroll_x and scroll_y properties may be changed.
:attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
scroll_y = NumericProperty(0)
'''Y scrolling value of the viewport. See :attr:`scroll_x` for more
information.
:attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
selection_color = ListProperty([0.1843, 0.6549, 0.8313, .5])
'''Current color of the selection, in (r, g, b, a) format.
.. warning::
The color should always have an "alpha" component less than 1
since the selection is drawn after the text.
:attr:`selection_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0.1843, 0.6549, 0.8313, .5].
'''
border = ListProperty([4, 4, 4, 4])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with :attr:`background_normal` and
:attr:`background_active`. Can be used for a custom background.
.. versionadded:: 1.4.1
It must be a list of four values: (top, right, bottom, left). Read the
BorderImage instruction for more information about how to use it.
:attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults
to (4, 4, 4, 4).
'''
background_normal = StringProperty(
'atlas://data/images/defaulttheme/textinput')
'''Background image of the TextInput when it's not in focus.
.. versionadded:: 1.4.1
:attr:`background_normal` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput'.
'''
background_disabled_normal = StringProperty(
'atlas://data/images/defaulttheme/textinput_disabled')
'''Background image of the TextInput when disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled_normal` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_disabled'.
'''
background_active = StringProperty(
'atlas://data/images/defaulttheme/textinput_active')
'''Background image of the TextInput when it's in focus.
.. versionadded:: 1.4.1
:attr:`background_active` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_active'.
'''
background_disabled_active = StringProperty(
'atlas://data/images/defaulttheme/textinput_disabled_active')
'''Background image of the TextInput when it's in focus and disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled_active` is a
:class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/textinput_disabled_active'.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Current color of the background, in (r, g, b, a) format.
.. versionadded:: 1.2.0
:attr:`background_color` is a :class:`~kivy.properties.ListProperty`
and defaults to [1, 1, 1, 1] (white).
'''
foreground_color = ListProperty([0, 0, 0, 1])
'''Current color of the foreground, in (r, g, b, a) format.
.. versionadded:: 1.2.0
:attr:`foreground_color` is a :class:`~kivy.properties.ListProperty`
and defaults to [0, 0, 0, 1] (black).
'''
disabled_foreground_color = ListProperty([0, 0, 0, .5])
'''Current color of the foreground when disabled, in (r, g, b, a) format.
.. versionadded:: 1.8.0
:attr:`disabled_foreground_color` is a
:class:`~kivy.properties.ListProperty` and
defaults to [0, 0, 0, 5] (50% transparent black).
'''
use_bubble = BooleanProperty(not _is_desktop)
'''Indicates whether the cut/copy/paste bubble is used.
.. versionadded:: 1.7.0
:attr:`use_bubble` is a :class:`~kivy.properties.BooleanProperty`
and defaults to True on mobile OS's, False on desktop OS's.
'''
use_handles = BooleanProperty(not _is_desktop)
'''Indicates whether the selection handles are displayed.
.. versionadded:: 1.8.0
:attr:`use_handles` is a :class:`~kivy.properties.BooleanProperty`
and defaults to True on mobile OS's, False on desktop OS's.
'''
suggestion_text = StringProperty('')
'''Shows a suggestion text/word from currentcursor position onwards,
that can be used as a possible completion. Usefull for suggesting completion
text. This can also be used by the IME to setup the current word being
edited
.. versionadded:: 1.9.0
:attr:`suggestion_text` is a :class:`~kivy.properties.StringProperty`
defaults to `''`
'''
def on_suggestion_text(self, instance, value):
global MarkupLabel
if not MarkupLabel:
from kivy.core.text.markup import MarkupLabel
cursor_pos = self.cursor_pos
txt = self._lines[self.cursor_row]
cr = self.cursor_row
kw = self._get_line_options()
rct = self._lines_rects[cr]
lbl = text = None
if value:
lbl = MarkupLabel(
text=txt + "[b]{}[/b]".format(value), **kw)
else:
lbl = Label(**kw)
text = txt
lbl.refresh()
self._lines_labels[cr] = lbl.texture
rct.size = lbl.size
self._update_graphics()
def get_sel_from(self):
return self._selection_from
selection_from = AliasProperty(get_sel_from, None)
'''If a selection is in progress or complete, this property will represent
the cursor index where the selection started.
.. versionchanged:: 1.4.0
:attr:`selection_from` is an :class:`~kivy.properties.AliasProperty`
and defaults to None, readonly.
'''
def get_sel_to(self):
return self._selection_to
selection_to = AliasProperty(get_sel_to, None)
'''If a selection is in progress or complete, this property will represent
the cursor index where the selection started.
.. versionchanged:: 1.4.0
:attr:`selection_to` is an :class:`~kivy.properties.AliasProperty` and
defaults to None, readonly.
'''
selection_text = StringProperty(u'')
'''Current content selection.
:attr:`selection_text` is a :class:`~kivy.properties.StringProperty`
and defaults to '', readonly.
'''
def on_selection_text(self, instance, value):
if value:
if self.use_handles:
self._trigger_show_handles()
if CutBuffer and not self.password:
self._trigger_update_cutbuffer()
def _get_text(self, encode=False):
lf = self._lines_flags
l = self._lines
len_l = len(l)
if len(lf) < len_l:
lf.append(1)
text = u''.join([(u'\n' if (lf[i] & FL_IS_NEWLINE) else u'') + l[i]
for i in range(len_l)])
if encode and not isinstance(text, bytes):
text = text.encode('utf8')
return text
def _set_text(self, text):
if isinstance(text, bytes):
text = text.decode('utf8')
if self.replace_crlf:
text = text.replace(u'\r\n', u'\n')
if self._get_text(encode=False) == text:
return
self._refresh_text(text)
self.cursor = self.get_cursor_from_index(len(text))
text = AliasProperty(_get_text, _set_text, bind=('_lines', ))
'''Text of the widget.
Creation of a simple hello world::
widget = TextInput(text='Hello world')
If you want to create the widget with an unicode string, use::
widget = TextInput(text=u'My unicode string')
:attr:`text` is an :class:`~kivy.properties.AliasProperty`.
'''
font_name = StringProperty('Roboto')
'''Filename of the font to use. The path can be absolute or relative.
Relative paths are resolved by the :func:`~kivy.resources.resource_find`
function.
.. warning::
Depending on your text provider, the font file may be ignored. However,
you can mostly use this without problems.
If the font used lacks the glyphs for the particular language/symbols
you are using, you will see '[]' blank box characters instead of the
actual glyphs. The solution is to use a font that has the glyphs you
need to display. For example, to display |unicodechar|, use a font like
freesans.ttf that has the glyph.
.. |unicodechar| image:: images/unicode-char.png
:attr:`font_name` is a :class:`~kivy.properties.StringProperty` and
defaults to 'Roboto'.
'''
font_size = NumericProperty('15sp')
'''Font size of the text in pixels.
:attr:`font_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to 10.
'''
hint_text = StringProperty('')
'''Hint text of the widget.
Shown if text is '' and focus is False.
.. versionadded:: 1.6.0
:attr:`hint_text` a :class:`~kivy.properties.StringProperty` and defaults
to ''.
'''
hint_text_color = ListProperty([0.5, 0.5, 0.5, 1.0])
'''Current color of the hint_text text, in (r, g, b, a) format.
.. versionadded:: 1.6.0
:attr:`hint_text_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [0.5, 0.5, 0.5, 1.0] (grey).
'''
auto_indent = BooleanProperty(False)
'''Automatically indent multiline text.
.. versionadded:: 1.7.0
:attr:`auto_indent` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
replace_crlf = BooleanProperty(True)
'''Automatically replace CRLF with LF.
.. versionadded:: 1.9.1
:attr:`replace_crlf` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
allow_copy = BooleanProperty(True)
'''Decides whether to allow copying the text.
.. versionadded:: 1.8.0
:attr:`allow_copy` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
def _get_min_height(self):
return (len(self._lines) * (self.line_height + self.line_spacing)
+ self.padding[1] + self.padding[3])
minimum_height = AliasProperty(_get_min_height, None,
bind=('_lines', 'line_spacing', 'padding',
'font_size', 'font_name', 'password',
'hint_text', 'line_height'))
'''Minimum height of the content inside the TextInput.
.. versionadded:: 1.8.0
:attr:`minimum_height` is a readonly
:class:`~kivy.properties.AliasProperty`.
.. warning::
:attr:`minimum_width` is calculated based on :attr:`width` therefore
code like this will lead to an infinite loop::
<FancyTextInput>:
height: self.minimum_height
width: self.height
'''
line_spacing = NumericProperty(0)
'''Space taken up between the lines.
.. versionadded:: 1.8.0
:attr:`line_spacing` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
input_filter = ObjectProperty(None, allownone=True)
''' Filters the input according to the specified mode, if not None. If
None, no filtering is applied.
.. versionadded:: 1.9.0
:attr:`input_filter` is an :class:`~kivy.properties.ObjectProperty` and
defaults to `None`. Can be one of `None`, `'int'` (string), or `'float'`
(string), or a callable. If it is `'int'`, it will only accept numbers.
If it is `'float'` it will also accept a single period. Finally, if it is
a callable it will be called with two parameter; the string to be added
and a bool indicating whether the string is a result of undo (True). The
callable should return a new substring that will be used instead.
'''
handle_image_middle = StringProperty(
'atlas://data/images/defaulttheme/selector_middle')
'''Image used to display the middle handle on the TextInput for cursor
positioning.
.. versionadded:: 1.8.0
:attr:`handle_image_middle` is a :class:`~kivy.properties.StringProperty`
and defaults to 'atlas://data/images/defaulttheme/selector_middle'.
'''
def on_handle_image_middle(self, instance, value):
if self._handle_middle:
self._handle_middle.source = value
handle_image_left = StringProperty(
'atlas://data/images/defaulttheme/selector_left')
'''Image used to display the Left handle on the TextInput for selection.
.. versionadded:: 1.8.0
:attr:`handle_image_left` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/selector_left'.
'''
def on_handle_image_left(self, instance, value):
if self._handle_left:
self._handle_left.source = value
handle_image_right = StringProperty(
'atlas://data/images/defaulttheme/selector_right')
'''Image used to display the Right handle on the TextInput for selection.
.. versionadded:: 1.8.0
:attr:`handle_image_right` is a
:class:`~kivy.properties.StringProperty` and defaults to
'atlas://data/images/defaulttheme/selector_right'.
'''
def on_handle_image_right(self, instance, value):
if self._handle_right:
self._handle_right.source = value
write_tab = BooleanProperty(True)
'''Whether the tab key should move focus to the next widget or if it should
enter a tab in the :class:`TextInput`. If `True` a tab will be written,
otherwise, focus will move to the next widget.
.. versionadded:: 1.9.0
:attr:`write_tab` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `True`.
'''
if __name__ == '__main__':
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
class TextInputApp(App):
def build(self):
Builder.load_string('''
<TextInput>
on_text:
self.suggestion_text = ''
self.suggestion_text = 'ion_text'
''')
root = BoxLayout(orientation='vertical')
textinput = TextInput(multiline=True, use_bubble=True,
use_handles=True)
#textinput.text = __doc__
root.add_widget(textinput)
textinput2 = TextInput(multiline=False, text='monoline textinput',
size_hint=(1, None), height=30)
root.add_widget(textinput2)
return root
TextInputApp().run()
|
mit
| 6,712,873,367,837,512,000
| 34.263362
| 80
| 0.549193
| false
| 3.997723
| false
| false
| false
|
robcarver17/systematictradingexamples
|
plots_for_perhaps/compareoptmethods.py
|
1
|
22426
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, show, xticks, xlabel, ylabel, legend, yscale, title, savefig, rcParams, figure, hist, text, bar, subplots
import Image
def file_process(filename):
fig = plt.gcf()
fig.set_size_inches(18.5,10.5)
fig.savefig("/home/rob/%s.png" % filename,dpi=300)
fig.savefig("/home/rob/%sLOWRES.png" % filename,dpi=50)
Image.open("/home/rob/%s.png" % filename).convert('L').save("/home/rob/%s.jpg" % filename)
Image.open("/home/rob/%sLOWRES.png" % filename).convert('L').save("/home/rob/%sLOWRES.jpg" % filename)
"""
compare:
handcrafting
bootstrapped
one shot
equal weights
market cap weights
"""
import pandas as pd
from datetime import datetime as dt
def read_ts_csv(fname, dindex="Date"):
data=pd.read_csv(fname)
dateindex=[dt.strptime(dx, "%d/%m/%y") for dx in list(data[dindex])]
data.index=dateindex
del(data[dindex])
return data
def calc_asset_returns(rawdata, tickers):
asset_returns=pd.concat([get_monthly_tr(tickname, rawdata) for tickname in tickers], axis=1)
asset_returns.columns=tickers
return asset_returns
def get_monthly_tr(tickname, rawdata):
total_returns=rawdata[tickname+"_TR"]
return (total_returns / total_returns.shift(1)) - 1.0
def portfolio_return(asset_returns, cash_weights):
index_returns=asset_returns.cumsum().ffill().diff()
cash_align = cash_weights.reindex(asset_returns.index, method="ffill")
cash_align[np.isnan(index_returns)]=0.0
cash_align[np.isnan(cash_align)]=0.0
vols=pd.ewmstd(asset_returns, span=100, min_periods=1)
riskweights=pd.DataFrame(cash_align.values / vols.values, index=vols.index)
riskweights.columns=asset_returns.columns
riskweights[np.isnan(riskweights)]=0.0
def _rowfix(x):
if all([y==0.0 for y in x]):
return x
sumx=sum(x)
return [y/sumx for y in x]
riskweights = riskweights.apply(_rowfix, axis=1)
portfolio_returns=asset_returns*riskweights
portfolio_returns[np.isnan(portfolio_returns)]=0.0
portfolio_returns=portfolio_returns.sum(axis=1)
return portfolio_returns
import matplotlib.pyplot as plt
from scipy import stats
import pandas as pd
import numpy as np
from datetime import datetime as dt
import datetime
from scipy.optimize import minimize
from copy import copy
import random
def correlation_matrix(returns):
"""
Calcs a correlation matrix using weekly returns from a pandas time series
We use weekly returns because otherwise end of day effects, especially over time zones, give
unrealistically low correlations
"""
asset_index=returns.cumsum().ffill()
asset_index=asset_index.resample('1W') ## Only want index, fill method is irrelevant
asset_index = asset_index - asset_index.shift(1)
return asset_index.corr().values
def create_dull_pd_matrix(dullvalue=0.0, dullname="A", startdate=pd.datetime(1970,1,1).date(), enddate=datetime.datetime.now().date(), index=None):
"""
create a single valued pd matrix
"""
if index is None:
index=pd.date_range(startdate, enddate)
dullvalue=np.array([dullvalue]*len(index))
ans=pd.DataFrame(dullvalue, index, columns=[dullname])
return ans
def addem(weights):
## Used for constraints
return 1.0 - sum(weights)
def variance(weights, sigma):
## returns the variance (NOT standard deviation) given weights and sigma
return (np.matrix(weights)*sigma*np.matrix(weights).transpose())[0,0]
def neg_SR(weights, sigma, mus):
## Returns minus the Sharpe Ratio (as we're minimising)
"""
estreturn=250.0*((np.matrix(x)*mus)[0,0])
variance=(variance(x,sigma)**.5)*16.0
"""
estreturn=(np.matrix(weights)*mus)[0,0]
std_dev=(variance(weights,sigma)**.5)
return -estreturn/std_dev
def sigma_from_corr(std, corr):
sigma=std*corr*std
return sigma
def basic_opt(std,corr,mus):
number_assets=mus.shape[0]
sigma=sigma_from_corr(std, corr)
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
return minimize(neg_SR_riskfree, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
def neg_SR_riskfree(weights, sigma, mus, riskfree=0.005):
## Returns minus the Sharpe Ratio (as we're minimising)
"""
estreturn=250.0*((np.matrix(x)*mus)[0,0])
variance=(variance(x,sigma)**.5)*16.0
"""
estreturn=(np.matrix(weights)*mus)[0,0] - riskfree
std_dev=(variance(weights,sigma)**.5)
return -estreturn/std_dev
def equalise_vols(returns, default_vol):
"""
Normalises returns so they have the in sample vol of defaul_vol (annualised)
Assumes daily returns
"""
factors=(default_vol/16.0)/returns.std(axis=0)
facmat=create_dull_pd_matrix(dullvalue=factors, dullname=returns.columns, index=returns.index)
norm_returns=returns*facmat
norm_returns.columns=returns.columns
return norm_returns
def offdiag_matrix(offvalue, nlength):
identity=np.diag([1.0]*nlength)
for x in range(nlength):
for y in range(nlength):
if x!=y:
identity[x][y]=offvalue
return identity
def get_avg_corr(sigma):
new_sigma=copy(sigma)
np.fill_diagonal(new_sigma,np.nan)
return np.nanmean(new_sigma)
def nearest_to_listvals(x, lvalues=[0.0, 0.25, 0.5, 0.75, 0.9]):
## return x rounded to nearest of lvalues
if len(lvalues)==1:
return lvalues[0]
d1=abs(x - lvalues[0])
d2=abs(x - lvalues[1])
if d1<d2:
return lvalues[0]
newlvalues=lvalues[1:]
return nearest_to_listvals(x, newlvalues)
def handcrafted(returns, equalisevols=True, default_vol=0.2):
"""
Handcrafted optimiser
"""
count_assets=len(returns.columns)
try:
assert equalisevols is True
assert count_assets<=3
except:
raise Exception("Handcrafting only works with equalised vols and 3 or fewer assets")
if count_assets<3:
## Equal weights
return [1.0/count_assets]*count_assets
est_corr=returns.corr().values
c1=nearest_to_listvals(est_corr[0][1])
c2=nearest_to_listvals(est_corr[0][2])
c3=nearest_to_listvals(est_corr[1][2])
wts_to_use=HANDCRAFTED_WTS[(HANDCRAFTED_WTS.c1==c1) & (HANDCRAFTED_WTS.c2==c2) & (HANDCRAFTED_WTS.c3==c3)].irow(0)
return [wts_to_use.w1, wts_to_use.w2, wts_to_use.w3]
def opt_shrinkage(returns, shrinkage_factors, equalisevols=True, default_vol=0.2):
"""
Returns the optimal portfolio for the dataframe returns using shrinkage
shrinkage_factors is a tuple, shrinkage of mean and correlation
If equalisevols=True then normalises returns to have same standard deviation; the weights returned
will be 'risk weightings'
"""
if equalisevols:
use_returns=equalise_vols(returns, default_vol)
else:
use_returns=returns
(shrinkage_mean, shrinkage_corr)=shrinkage_factors
## Sigma matrix
## Use correlation and then convert back to variance
est_corr=use_returns.corr().values
avg_corr=get_avg_corr(est_corr)
prior_corr=offdiag_matrix(avg_corr, est_corr.shape[0])
sigma_corr=shrinkage_corr*prior_corr+(1-shrinkage_corr)*est_corr
cov_vector=use_returns.std().values
sigma=cov_vector*sigma_corr*cov_vector
## mus vector
avg_return=np.mean(use_returns.mean())
est_mus=np.array([use_returns[asset_name].mean() for asset_name in use_returns.columns], ndmin=2).transpose()
prior_mus=np.array([avg_return for asset_name in use_returns.columns], ndmin=2).transpose()
mus=shrinkage_mean*prior_mus+(1-shrinkage_mean)*est_mus
## Starting weights
number_assets=use_returns.shape[1]
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
return ans['x']
def handcraft_equal(returns):
"""
dynamic handcrafting, equal weights only
"""
## RETURNS Correlation matrix
use_returns=equalise_vols(returns, default_vol=16.0)
## Sigma matrix = correlations
sigma=use_returns.cov()
sigma[sigma<0.0]=0.0
ungroupedreturns=dict([(x,returns[x]) for x in returns.columns])
tree_data=hc_sigma(sigma, ungroupedreturns)
tree_data=grouping_tree(tree_data)
weights=tree_to_weights(tree_data)
return weights
def hc_sigma(ungrouped_sigma, ungroupedreturns, groupdata=None):
"""
handcraft weights from sigma matrix
Algo:
- Find pair of assets with highest correlation
- Form them into a new group with equal weights
- The group becomes like a new asset
- Once we only have two assets left, stop.
Need to
"""
if len(ungroupedreturns)==1:
return groupdata[1]
if groupdata is None:
## first run
## groupdata stores grouping information
## To begin with each group just consists of one asset
groupdata=[[],list(ungrouped_sigma.columns)]
groupedreturns=dict()
## iteration
while len(ungroupedreturns)>0:
## current_sigma consists of the correlation of things we currently have
if len(ungroupedreturns)==1:
idx_list=[0]
else:
idx_list=find_highest_corr(ungrouped_sigma)
name_list=tuple([ungrouped_sigma.columns[idx] for idx in idx_list])
## pair those things up
(ungrouped_sigma, ungroupedreturns, groupedreturns,
groupdata)=group_assets(ungrouped_sigma, ungroupedreturns, groupedreturns, groupdata, idx_list, name_list)
new_returns=pd.concat(groupedreturns, axis=1)
new_sigma=new_returns.corr()
## recursive
return hc_sigma(new_sigma, groupedreturns, groupdata=[[],groupdata[0]])
def find_highest_corr(sigmat):
new_sigmat=copy(sigmat.values)
np.fill_diagonal(new_sigmat, -100.0)
(i,j)=np.unravel_index(new_sigmat.argmax(), new_sigmat.shape)
return (i,j)
def group_assets(ungrouped_sigma, ungroupedreturns, groupedreturns, groupdata, idx_list, name_list):
"""
Group assets
"""
todelete=[]
names=[]
grouping=[]
group_returns=[]
weights=[1.0/len(idx_list)]*len(idx_list) ## could have more complex thing here...
for (itemweight,idx, iname) in zip(weights,idx_list, name_list):
gi=groupdata[1][idx]
grouping.append(gi)
gri=ungroupedreturns.pop(iname)
group_returns.append(gri*itemweight)
names.append(gri.name)
ungrouped_sigma=ungrouped_sigma.drop(iname, axis=0)
ungrouped_sigma=ungrouped_sigma.drop(iname, axis=1)
todelete.append(idx)
groupdata[0].append(grouping)
gr_returns=pd.concat(group_returns, axis=1)
gr_returns=gr_returns.sum(axis=1)
gr_returns.name="[%s]" % "+".join(names)
print "Pairing %s" % ", ".join(names)
groupedreturns[gr_returns.name]=gr_returns
groupdata[1]=[element for eindex, element in enumerate(groupdata[1]) if eindex not in todelete]
return (ungrouped_sigma, ungroupedreturns, groupedreturns,
groupdata)
def grouping_tree(tree_data, sigma):
"""
Group branches of 2 into larger if possible
"""
pass
def corrs_in_group(group, sigma):
asset_list=sum(group, [])
littlesigma=sigma.loc[asset_list, asset_list]
def corr_from_leaf(leaf, sigma):
return sigma[leaf[0]][leaf[1]]
def tree_to_weights(tree_data):
"""
convert a tree into weights
"""
pass
def markosolver(returns, equalisemeans=False, equalisevols=True, default_vol=0.2, default_SR=1.0):
"""
Returns the optimal portfolio for the dataframe returns
If equalisemeans=True then assumes all assets have same return if False uses the asset means
If equalisevols=True then normalises returns to have same standard deviation; the weights returned
will be 'risk weightings'
Note if usemeans=True and equalisevols=True effectively assumes all assets have same sharpe ratio
"""
if equalisevols:
use_returns=equalise_vols(returns, default_vol)
else:
use_returns=returns
## Sigma matrix
sigma=use_returns.cov().values
## Expected mean returns
est_mus=[use_returns[asset_name].mean() for asset_name in use_returns.columns]
missingvals=[np.isnan(x) for x in est_mus]
if equalisemeans:
## Don't use the data - Set to the average Sharpe Ratio
mus=[default_vol*default_SR]*returns.shape[1]
else:
mus=est_mus
mus=np.array(mus, ndmin=2).transpose()
## Starting weights
number_assets=use_returns.shape[1]
start_weights=[1.0/number_assets]*number_assets
## Constraints - positive weights, adding to 1.0
bounds=[(0.0,1.0)]*number_assets
cdict=[{'type':'eq', 'fun':addem}]
ans=minimize(neg_SR, start_weights, (sigma, mus), method='SLSQP', bounds=bounds, constraints=cdict, tol=0.00001)
wts=ans['x']
return wts
def bootstrap_portfolio(returns_to_bs, monte_carlo=200, monte_length=250, equalisemeans=False, equalisevols=True, default_vol=0.2, default_SR=1.0):
"""
Given dataframe of returns; returns_to_bs, performs a bootstrap optimisation
We run monte_carlo numbers of bootstraps
Each one contains monte_length days drawn randomly, with replacement
(so *not* block bootstrapping)
The other arguments are passed to the optimisation function markosolver
Note - doesn't deal gracefully with missing data. Will end up downweighting stuff depending on how
much data is missing in each boostrap. You'll need to think about how to solve this problem.
"""
weightlist=[]
for unused_index in range(monte_carlo):
bs_idx=[int(random.uniform(0,1)*len(returns_to_bs)) for i in range(monte_length)]
returns=returns_to_bs.iloc[bs_idx,:]
weight=markosolver(returns, equalisemeans=equalisemeans, equalisevols=equalisevols, default_vol=default_vol, default_SR=default_SR)
weightlist.append(weight)
### We can take an average here; only because our weights always add up to 1. If that isn't true
### then you will need to some kind of renormalisation
theweights_mean=list(np.mean(weightlist, axis=0))
return theweights_mean
def optimise_over_periods(data, date_method, fit_method, rollyears=20, equalisemeans=False, equalisevols=True,
monte_carlo=100, monte_length=None, shrinkage_factors=(0.5, 0.5),
weightdf=None):
"""
Do an optimisation
Returns data frame of weights
Note if fitting in sample weights will be somewhat boring
Doesn't deal with eg missing data in certain subperiods
"""
if monte_length is None:
monte_length=int(len(data.index)*.1)
## Get the periods
fit_periods=generate_fitting_dates(data, date_method, rollyears=rollyears)
## Do the fitting
## Build up a list of weights, which we'll concat
weight_list=[]
for fit_tuple in fit_periods:
## Fit on the slice defined by first two parts of the tuple
period_subset_data=data[fit_tuple[0]:fit_tuple[1]]
## Can be slow, if bootstrapping, so indicate where we are
print "Fitting data for %s to %s" % (str(fit_tuple[2]), str(fit_tuple[3]))
if fit_method=="one_period":
weights=markosolver(period_subset_data, equalisemeans=equalisemeans, equalisevols=equalisevols)
elif fit_method=="bootstrap":
weights=bootstrap_portfolio(period_subset_data, equalisemeans=equalisemeans,
equalisevols=equalisevols, monte_carlo=monte_carlo,
monte_length=monte_length)
elif fit_method=="shrinkage":
weights=opt_shrinkage(period_subset_data, shrinkage_factors=shrinkage_factors, equalisevols=equalisevols)
elif fit_method=="fixed":
weights=[float(weightdf[weightdf.Country==ticker].Weight.values) for ticker in list(period_subset_data.columns)]
else:
raise Exception("Fitting method %s unknown" % fit_method)
## We adjust dates slightly to ensure no overlaps
dindex=[fit_tuple[2]+datetime.timedelta(seconds=1), fit_tuple[3]-datetime.timedelta(seconds=1)]
## create a double row to delineate start and end of test period
weight_row=pd.DataFrame([weights]*2, index=dindex, columns=data.columns)
weight_list.append(weight_row)
weight_df=pd.concat(weight_list, axis=0)
return weight_df
"""
Now we need to do this with expanding or rolling window
"""
"""
Generate the date tuples
"""
def generate_fitting_dates(data, date_method, rollyears=20):
"""
generate a list 4 tuples, one element for each year in the data
each tuple contains [fit_start, fit_end, period_start, period_end] datetime objects
the last period will be a 'stub' if we haven't got an exact number of years
date_method can be one of 'in_sample', 'expanding', 'rolling'
if 'rolling' then use rollyears variable
"""
start_date=data.index[0]
end_date=data.index[-1]
## generate list of dates, one year apart, including the final date
yearstarts=list(pd.date_range(start_date, end_date, freq="12M"))+[end_date]
## loop through each period
periods=[]
for tidx in range(len(yearstarts))[1:-1]:
## these are the dates we test in
period_start=yearstarts[tidx]
period_end=yearstarts[tidx+1]
## now generate the dates we use to fit
if date_method=="in_sample":
fit_start=start_date
elif date_method=="expanding":
fit_start=start_date
elif date_method=="rolling":
yearidx_to_use=max(0, tidx-rollyears)
fit_start=yearstarts[yearidx_to_use]
else:
raise Exception("don't recognise date_method %s" % date_method)
if date_method=="in_sample":
fit_end=end_date
elif date_method in ['rolling', 'expanding']:
fit_end=period_start
else:
raise Exception("don't recognise date_method %s " % date_method)
periods.append([fit_start, fit_end, period_start, period_end])
## give the user back the list of periods
return periods
rawdata=read_ts_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/MSCI_data.csv")
refdata=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/MSCI_ref.csv")
tickers=list(refdata[(refdata.EmorDEV=="DEV") & (refdata.Type=="Country")].Country.values) #mom 12bp
#tickers=list(refdata[refdata.Type=="Country"].Country.values) #mom 12bp
fix_hcweights=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/devhcweights.csv")
fix_capweights=pd.read_csv("/home/rob/workspace/systematictradingexamples/plots_for_perhaps/devcapweights.csv")
fix_eqweights=pd.DataFrame(dict(Country=tickers, Weight=[1.0/len(tickers)]*len(tickers)))
data=calc_asset_returns(rawdata, tickers)
### IDEA: to boostrap the results
### Repeatedly draw from 'data' to make new pseudo series
oneperiodweights=optimise_over_periods(data, "expanding", "one_period", equalisemeans=False, equalisevols=True)
#bootstrapweights=optimise_over_periods(data, "expanding", "bootstrap", equalisemeans=True, equalisevols=True)
exposthcweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_hcweights, equalisemeans=True, equalisevols=True)
equalweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_eqweights, equalisemeans=True, equalisevols=True)
marketcapweights=optimise_over_periods(data, "expanding", "fixed", weightdf=fix_capweights, equalisemeans=True, equalisevols=True)
index_returns=(1.0+data).cumprod().ffill()
last_return=index_returns.irow(-1).values
last_return=pd.DataFrame(np.array([last_return]*len(data)), data.index)
last_return.columns=data.columns
index_returns = index_returns / last_return
marketcapweights = marketcapweights.reindex(index_returns.index, method="ffill")
marketcapweights=marketcapweights*index_returns
marketcapweights=marketcapweights.ffill()
## portfolio, take out missing weights
p1=portfolio_return(data, oneperiodweights)[pd.datetime(1994,1,1):]
#p2=portfolio_return(data, bootstrapweights)
p3=portfolio_return(data, exposthcweights)[pd.datetime(1994,1,1):]
p4=portfolio_return(data, equalweights)[pd.datetime(1994,1,1):]
p5=portfolio_return(data, marketcapweights)[pd.datetime(1994,1,1):]
drag1=p3 - p1
drag2=p4 - p5
def stats(x):
ann_mean=x.mean()*12
ann_std = x.std()*(12**.5)
geo_mean = ann_mean - (ann_std**2)/2.0
sharpe = geo_mean / ann_std
return (ann_mean, ann_std, geo_mean, sharpe)
print stats(p1)
print stats(p3)
print stats(p4)
print stats(p5)
toplot=pd.concat([p1, p3, p4, p5], axis=1)
toplot.columns=["Optimised", "Handcraft", "Equal", "Market Cap"]
toplot.cumsum().plot()
show()
p1.cumsum().plot(color="black", ls="solid")
p3.cumsum().plot(color="gray", ls="solid")
p4.cumsum().plot(color="black", ls="dashed")
p5.cumsum().plot(color="gray", ls="dashed")
legend( ["Optimised", "Handcraft", "Equal", "Market Cap"], loc="upper left")
frame=plt.gca()
#frame.get_yaxis().set_visible(False)
rcParams.update({'font.size': 18})
file_process("compareoptmethods")
show()
drag1.cumsum().plot(color="gray", ls="solid")
legend( [ "Handcraft vs MktCap"], loc="upper left")
frame=plt.gca()
#frame.get_yaxis().set_visible(False)
rcParams.update({'font.size': 18})
file_process("compareoptmethodstracking")
show()
|
gpl-2.0
| 4,701,163,412,939,915,000
| 30.408964
| 147
| 0.66463
| false
| 3.352669
| false
| false
| false
|
bqbn/addons-server
|
src/olympia/users/tests/test_user_utils.py
|
1
|
1451
|
# -*- coding: utf-8 -*-
import pytest
from olympia.amo.tests import user_factory
from olympia.users.utils import (
UnsubscribeCode, system_addon_submission_allowed)
def test_email_unsubscribe_code_parse():
email = u'nobody@mozîlla.org'
token, hash_ = UnsubscribeCode.create(email)
r_email = UnsubscribeCode.parse(token, hash_)
assert email == r_email
# A bad token or hash raises ValueError
with pytest.raises(ValueError):
UnsubscribeCode.parse(token, hash_[:-5])
with pytest.raises(ValueError):
UnsubscribeCode.parse(token[5:], hash_)
system_guids = pytest.mark.parametrize('guid', [
'foø@mozilla.org', 'baa@shield.mozilla.org', 'moo@pioneer.mozilla.org',
'blâh@mozilla.com', 'foø@Mozilla.Org', 'addon@shield.moZilla.com',
'baa@ShielD.MozillA.OrG', 'moo@PIONEER.mozilla.org', 'blâh@MOZILLA.COM',
'flop@search.mozilla.org', 'user@mozillaonline.com',
'tester@MoZiLlAoNlInE.CoM'
])
@system_guids
@pytest.mark.django_db
def test_system_addon_submission_allowed_mozilla_allowed(guid):
user = user_factory(email='firefox@mozilla.com')
data = {'guid': guid}
assert system_addon_submission_allowed(user, data)
@system_guids
@pytest.mark.django_db
def test_system_addon_submission_allowed_not_mozilla_not_allowed(guid):
user = user_factory(email='waterbadger@notzilla.org')
data = {'guid': guid}
assert not system_addon_submission_allowed(user, data)
|
bsd-3-clause
| 2,683,786,618,169,446,000
| 31.133333
| 76
| 0.707469
| false
| 3.031447
| true
| false
| false
|
vst/normalazy
|
normalazy.py
|
1
|
27657
|
import copy
import datetime
from collections import OrderedDict
from decimal import Decimal
from functools import wraps
from six import add_metaclass
#: Defines the version of the `normalazy` library.
__version__ = "0.0.3"
def iffnotnull(func):
"""
Wraps a function, returns None if the first argument is None, invokes the method otherwise.
:param func: The function to be wrapped.
:return: None or the result of the function.
>>> test1 = iffnotnull(lambda x: x)
>>> test1(None)
>>> test1(1)
1
"""
@wraps(func)
def wrapper(value, *args, **kwargs):
return None if value is None else func(value, *args, **kwargs)
return wrapper
def iffnotblank(func):
"""
Wraps a function, returns None if the first argument is empty, invokes the method otherwise.
:param func: The function to be wrapped.
:return: Empty string or the result of the function.
>>> test1 = iffnotblank(lambda x: x)
>>> test1("")
''
>>> test1(1)
1
"""
@wraps(func)
def wrapper(value, *args, **kwargs):
return value if value == "" else func(value, *args, **kwargs)
return wrapper
def identity(x):
"""
Defines an identity function.
:param x: value
:return: value
>>> identity(None)
>>> identity(1)
1
"""
return x
@iffnotnull
def as_string(x):
"""
Converts the value to a trimmed string.
:param x: Value.
:return: Trimmed string value.
>>> as_string(None)
>>> as_string("")
''
>>> as_string("a")
'a'
>>> as_string(" a ")
'a'
"""
return str(x).strip()
@iffnotnull
def as_factor(x):
"""
Converts the value to a factor string.
:param x: Value.
:return: Trimmed, up-cased string value.
>>> as_factor(None)
>>> as_factor("")
''
>>> as_factor("a")
'A'
>>> as_factor(" a ")
'A'
"""
return as_string(x).upper()
@iffnotnull
@iffnotblank
def as_number(x):
"""
Converts the value to a decimal value.
:param x: The value to be converted to a decimal value.
:return: A Decimal instance.
>>> as_number(None)
>>> as_number(1)
Decimal('1')
>>> as_number("1")
Decimal('1')
>>> as_number(" 1 ")
Decimal('1')
"""
return Decimal(as_string(x))
def as_boolean(x, predicate=None):
"""
Converts the value to a boolean value.
:param x: The value to be converted to a boolean value.
:param predicate: The predicate function if required.
:return: Boolean
>>> as_boolean(None)
False
>>> as_boolean("")
False
>>> as_boolean(" ")
True
>>> as_boolean(1)
True
>>> as_boolean(0)
False
>>> as_boolean("1")
True
>>> as_boolean("0")
True
>>> as_boolean("1", predicate=lambda x: int(x) != 0)
True
>>> as_boolean("0", predicate=lambda x: int(x) != 0)
False
>>> as_boolean("1", predicate=int)
True
>>> as_boolean("0", predicate=int)
False
>>> as_boolean("1", int)
True
>>> as_boolean("0", int)
False
"""
return bool(x if predicate is None else predicate(x))
@iffnotnull
@iffnotblank
def as_datetime(x, fmt=None):
"""
Converts the value to a datetime value.
:param x: The value to be converted to a datetime value.
:param fmt: The format of the date/time string.
:return: A datetime.date instance.
>>> as_datetime(None)
>>> as_datetime("")
''
>>> as_datetime("2015-01-01 00:00:00")
datetime.datetime(2015, 1, 1, 0, 0)
>>> as_datetime("2015-01-01T00:00:00", "%Y-%m-%dT%H:%M:%S")
datetime.datetime(2015, 1, 1, 0, 0)
>>> as_datetime("2015-01-01T00:00:00", fmt="%Y-%m-%dT%H:%M:%S")
datetime.datetime(2015, 1, 1, 0, 0)
"""
return datetime.datetime.strptime(x, fmt or "%Y-%m-%d %H:%M:%S")
@iffnotnull
@iffnotblank
def as_date(x, fmt=None):
"""
Converts the value to a date value.
:param x: The value to be converted to a date value.
:param fmt: The format of the date string.
:return: A datetime.date instance.
>>> as_date(None)
>>> as_date('')
''
>>> as_date("2015-01-01")
datetime.date(2015, 1, 1)
>>> as_date("Date: 2015-01-01", "Date: %Y-%m-%d")
datetime.date(2015, 1, 1)
>>> as_date("Date: 2015-01-01", fmt="Date: %Y-%m-%d")
datetime.date(2015, 1, 1)
"""
return datetime.datetime.strptime(x, fmt or "%Y-%m-%d").date()
class Value:
"""
Defines an immutable *[sic.]* boxed value with message, status and extra data as payload if required.
>>> value = Value(value=42, message=None, status=Value.Status.Success, extras="41 + 1")
>>> value.value
42
>>> value.message
>>> value.status == Value.Status.Success
True
>>> value.extras
'41 + 1'
>>> value = Value.success(42, date="2015-01-01")
>>> value.value
42
>>> value.status == Value.Status.Success
True
>>> value.date
'2015-01-01'
>>> value = Value.warning(value="fortytwo", message="Failed to convert to integer.", date="2015-01-01")
>>> value.value
'fortytwo'
>>> value.status == Value.Status.Warning
True
>>> value.date
'2015-01-01'
>>> value.message
'Failed to convert to integer.'
>>> value = Value.error(message="Failed to compute the value.", date="2015-01-01")
>>> value.value
>>> value.status == Value.Status.Error
True
>>> value.date
'2015-01-01'
>>> value.message
'Failed to compute the value.'
"""
class Status:
"""
Defines an enumeration for value status.
"""
#: Indicates that value is mapped successfully.
Success = 1
#: Indicates that value is mapped successfully with warnings.
Warning = 2
#: Indicates that value could not be mapped successfully.
Error = 3
def __init__(self, value=None, message=None, status=None, **kwargs):
"""
Constructs an immutable Value class instance.
Note that the classmethods `success`, `warning` and `error` should be preferred over this
constructor.
:param value: The atomic value.
:param message: Any messages if required.
:param status: The value status.
:param kwargs: Extra payload for the value.
"""
self.__value = value
self.__status = status or self.Status.Success
self.__message = message
self.__payload = kwargs
@property
def value(self):
return self.__value
@property
def status(self):
return self.__status
@property
def message(self):
return self.__message
@property
def payload(self):
return self.__payload
def __getattr__(self, item):
"""
Provides access to payload through attributes.
:param item: The name of the attribute.
:return: The value for the attribute if the attribute name is in payload.
"""
## Check if the item is in the payload:
if item in self.payload:
## Yes, return it.
return self.payload.get(item)
## Nope, escalate:
return super(Value, self).__getattr__(item)
@classmethod
def success(cls, value=None, message=None, **kwargs):
"""
Provides a convenience constructor for successful Value instances.
:param value: The value of the Value instance to be constructed.
:param message: The message, if any.
:param kwargs: Extra payload for the value.
:return: A successful Value instance.
"""
return cls(value=value, message=message, status=cls.Status.Success, **kwargs)
@classmethod
def warning(cls, value=None, message=None, **kwargs):
"""
Provides a convenience constructor for Values instances with warnings.
:param value: The value of the Value instance to be constructed.
:param message: The message, if any.
:param kwargs: Extra payload for the value.
:return: A Value instance with warnings.
"""
return cls(value=value, message=message, status=cls.Status.Warning, **kwargs)
@classmethod
def error(cls, value=None, message=None, **kwargs):
"""
Provides a convenience constructor for Values instances with errors.
:param value: The value of the Value instance to be constructed.
:param message: The message, if any.
:param kwargs: Extra payload for the value.
:return: A Value instance with errors.
"""
return cls(value=value, message=message, status=cls.Status.Error, **kwargs)
class Field(object):
"""
Provides a concrete mapper field.
>>> field = Field()
>>> field.map(None, dict()).value
>>> field.map(None, dict()).status == Value.Status.Success
True
>>> field = Field(null=False)
>>> field.map(None, dict()).value
>>> field.map(None, dict()).status == Value.Status.Error
True
>>> field = Field(func=lambda i, r: r.get("a", None))
>>> field.map(None, dict(a="")).value
''
>>> field.map(None, dict(a="")).status == Value.Status.Success
True
>>> field = Field(func=lambda i, r: r.get("a", None), blank=False)
>>> field.map(None, dict(a="")).value
''
>>> field.map(None, dict(a="")).status == Value.Status.Error
True
>>> field = Field(func=lambda i, r: r.get("a", None))
>>> field.map(None, dict()).value
>>> field.map(None, dict(a=1)).value
1
>>> field.map(None, dict(a=1)).status == Value.Status.Success
True
"""
def __init__(self, name=None, func=None, blank=True, null=True):
"""
Constructs a mapper field with the given argument.
:param name: The name of the field.
:param func: The function which is to be used to map the value.
:param blank: Boolean indicating if blank values are allowed.
:param null: Boolean indicating if null values are allowed.
"""
self.__name = name
self.__func = func
self.__blank = blank
self.__null = null
@property
def name(self):
"""
Returns the name of the field.
:return: The name of the field.
"""
return self.__name
@property
def func(self):
"""
Returns the mapping function of the field.
:return: The mapping function of the field.
"""
return self.__func
@property
def blank(self):
"""
Indicates if the value is allowed to be blank.
:return: Boolean indicating if the value is allowed to be blank.
"""
return self.__blank
@property
def null(self):
"""
Indicates if the value is allowed to be null.
:return: Boolean indicating if the value is allowed to be null.
"""
return self.__null
def rename(self, name):
"""
Renames the field.
:param name: The new name of the field.
"""
self.__name = name
def treat_value(self, value):
"""
Treats the value and return.
:param value: The value to be treated.
:return: A Value instance.
"""
## By now we have a value. If it is an instance of Value
## class, return it as is:
if isinstance(value, Value):
return value
## If the value is string and empty, but is not allowed to be so, return
## with error:
if not self.blank and isinstance(value, str) and value == "":
return Value.error(value="", message="Value is not allowed to be blank.")
## If the value is None but is not allowed to be so, return
## with error:
if not self.null and value is None:
return Value.error(message="Value is not allowed to be None.")
## OK, we have a value to be boxed and returned successfully:
return Value.success(value=value)
def map(self, instance, record):
"""
Returns the value of for field as a Value instance.
:param instance: The instance for which the value will be retrieved.
:param record: The raw record.
:return: A Value instance.
"""
## Check if we have a function:
if self.func is None:
## OK, value shall be None:
value = None
## Check if the function is a callable or the name of an attribute of the instance:
elif hasattr(self.func, "__call__"):
## The function is a callable. Call it directly on the
## instance and the record and get the raw value:
value = self.func(instance, record)
else:
## The function is not a callable. We assume that it is
## the name of a method of the instance. Apply the
## instance method on the record and get the raw value:
value = getattr(instance, self.func)(record)
## Treat the value and return:
return self.treat_value(value)
class KeyField(Field):
"""
Provides a mapper field for a given key which belongs to the
record. The record can be an object which has `__getitem__` method
or a simple object just with attribute access.
The method starts reading the source value using the key provided
checking `__getitem__` method (for iterables such as `dict` or
`list`), then checks the attribute for simple object attribute
access.
>>> field = KeyField(key="a")
>>> field.map(None, dict(a="")).value
''
>>> field.map(None, dict(a="")).status == Value.Status.Success
True
>>> field = KeyField(key="a", blank=False)
>>> field.map(None, dict(a="")).value
''
>>> field.map(None, dict(a="")).status == Value.Status.Error
True
>>> field = KeyField(key="a", func=lambda i, r, v: as_number(v))
>>> field.map(None, dict(a="12")).value
Decimal('12')
>>> field.map(None, dict(a="12")).status == Value.Status.Success
True
>>> field = KeyField(key="a", cast=as_number)
>>> field.map(None, dict(a="12")).value
Decimal('12')
>>> field.map(None, dict(a="12")).status == Value.Status.Success
True
>>> class Student:
... def __init__(self, name):
... self.name = name
>>> field = KeyField(key="name")
>>> field.map(None, Student("Sinan")).value
'Sinan'
"""
def __init__(self, key=None, cast=None, **kwargs):
"""
Constructs a mapper field with the given argument.
:param key: The key of the property of the record to be mapped.
:param cast: The function to be applied to the value.
:param **kwargs: Keyword arguments to `Field`.
"""
super(KeyField, self).__init__(**kwargs)
self.__key = key
self.__cast = cast
@property
def key(self):
"""
Returns the key of for the field mapping.
"""
return self.__key
def rename(self, name):
"""
Renames the field.
:param name: The new name of the field.
"""
## Call the super:
super(KeyField, self).rename(name)
## If the key is None, set it with joy:
if self.__key is None:
self.__key = name
def map(self, instance, record):
"""
Returns the value of for field as a Value instance.
:param instance: The instance for which the value will be retrieved.
:param record: The raw record.
:return: A Value instance.
"""
## Does the record have __getitem__ method (Indexable) and key exist?
if hasattr(record, "__getitem__") and self.key in record:
## Yes, get the value:
value = record.get(self.key)
## Nope, let's check if the record has such an attribute:
elif hasattr(record, self.key):
## Yes, get the value using attribute access:
value = getattr(record, self.key)
## We can't access such a value in the record.
else:
## OK, Value shall be None:
value = None
## Do we have a function:
if self.func is None:
## Nope, skip:
pass
## Check if the function is a callable or the name of an attribute of the instance:
elif hasattr(self.func, "__call__"):
## The function is a callable. Call it directly on the
## instance, the record and the raw value:
value = self.func(instance, record, value)
else:
## The function is not a callable. We assume that it is
## the name of a method on the instance. Apply the
## instance method on the record and the raw value:
value = getattr(instance, self.func)(record, value)
## OK, now we will cast if required:
if self.__cast is not None:
## Is it a Value instance?
if isinstance(value, Value):
value = Value(value=self.__cast(value.value), status=value.status, message=value.message)
else:
value = self.__cast(value)
## Done, treat the value and return:
return self.treat_value(value)
class ChoiceKeyField(KeyField):
"""
Defines a choice mapper for the index of the record provided.
>>> field = ChoiceKeyField(key="a", choices=dict(a=1, b=2))
>>> field.map(None, dict(a="a")).value
1
>>> field = ChoiceKeyField(key="a", choices=dict(a=1, b=2), func=lambda i, r, v: Decimal(str(v)))
>>> field.map(None, dict(a="a")).value
Decimal('1')
"""
def __init__(self, *args, **kwargs):
## Choices?
choices = kwargs.pop("choices", {})
## Get the function:
functmp = kwargs.pop("func", None)
## Compute the func
if functmp is not None:
func = lambda i, r, v: functmp(i, r, choices.get(v, None))
else:
func = lambda i, r, v: choices.get(v, None)
## Add the func back:
kwargs["func"] = func
## OK, proceed as usual:
super(ChoiceKeyField, self).__init__(*args, **kwargs)
class RecordMetaclass(type):
"""
Provides a record metaclass.
"""
def __new__(mcs, name, bases, attrs, **kwargs):
## Pop all fields:
fields = dict([(key, attrs.pop(key)) for key in list(attrs.keys()) if isinstance(attrs.get(key), Field)])
## Check fields and make sure that names are added:
for name, field in fields.items():
if field.name is None:
field.rename(name)
## Get the record class as usual:
record_cls = super(RecordMetaclass, mcs).__new__(mcs, name, bases, attrs, **kwargs)
## Attach fields to the class:
record_cls._fields = {}
## Now, process the fields:
record_cls._fields.update(fields)
## Done, return the record class:
return record_cls
@add_metaclass(RecordMetaclass)
class Record(object):
"""
Provides a record normalizer base class.
>>> class Test1Record(Record):
... a = KeyField()
>>> record1 = Test1Record(dict(a=1))
>>> record1.a
1
>>> class Test2Record(Record):
... a = KeyField()
... b = ChoiceKeyField(choices={1: "Bir", 2: "Iki"})
>>> record2 = Test2Record(dict(a=1, b=2))
>>> record2.a
1
>>> record2.b
'Iki'
We can get the dictionary representation of records:
>>> record1.as_dict()
OrderedDict([('a', 1)])
>>> record2.as_dict()
OrderedDict([('a', 1), ('b', 'Iki')])
Or detailed:
>>> record1.as_dict(detailed=True)
OrderedDict([('a', OrderedDict([('value', '1'), ('status', 1), ('message', None)]))])
>>> record2.as_dict(detailed=True)
OrderedDict([('a', OrderedDict([('value', '1'), ('status', 1), ('message', None)])), \
('b', OrderedDict([('value', 'Iki'), ('status', 1), ('message', None)]))])
We can also create a new record from an existing record or dictionary:
>>> class Test3Record(Record):
... a = KeyField()
... b = KeyField()
>>> record3 = Test3Record.new(record2)
>>> record3.a
1
>>> record3.b
'Iki'
>>> record3.a == record2.a
True
>>> record3.b == record2.b
True
With dictionary:
>>> record4 = Test3Record.new({"a": 1, "b": "Iki"})
>>> record4.a
1
>>> record4.b
'Iki'
>>> record4.a == record2.a
True
>>> record4.b == record2.b
True
Or even override some fields:
>>> record5 = Test3Record.new(record3, b="Bir")
>>> record5.a
1
>>> record5.b
'Bir'
"""
## TODO: [Improvement] Rename _fields -> __fields, _values -> __value
def __init__(self, record):
## Save the record slot:
self.__record = record
## Declare the values map:
self._values = {}
def __getattr__(self, item):
"""
Returns the value of the attribute named `item`, particularly from within the fields set or pre-calculated
field values set.
:param item: The name of the attribute, in particular the field name.
:return: The value (value attribute of the Value).
"""
return self.getval(item).value
def hasval(self, name):
"""
Indicates if we have a value slot called ``name``.
:param name: The name of the value slot.
:return: ``True`` if we have a value slot called ``name``, ``False`` otherwise.
"""
return name in self._fields
def getval(self, name):
"""
Returns the value slot identified by the ``name``.
:param name: The name of the value slot.
:return: The value slot, ie. the boxed value instance of class :class:`Value`.
"""
## Did we compute this before?
if name in self._values:
## Yes, return the value slot:
return self._values.get(name)
## Do we have such a value slot?
if not self.hasval(name):
raise AttributeError("Record does not have value slot named '{}'".format(name))
## Apparently, we have never computed the value. Let's compute the value slot and return:
return self.setval(name, self._fields.get(name).map(self, self.__record))
def setval(self, name, value, status=None, message=None, **kwargs):
"""
Sets a value to the value slot.
:param name: The name of the value slot.
:param value: The value to be set (Either a Python value or a :class:`Value` instance.)
:param status: The status of the value slot if any.
:param message: The message of the value slot if any.
:param kwargs: Additional named values as payload to value.
:return: The :class:`Value` instance set.
"""
## Do we have such a value slot?
if not self.hasval(name):
raise AttributeError("Record does not have value slot named '{}'".format(name))
## Create a value instance:
if isinstance(value, Value):
## Get a copy of payload if any:
payload = copy.deepcopy(value.payload)
## Update the payload with kwargs:
payload.update(kwargs.copy())
## Create the new value:
value = Value(value=value.value, status=status or value.status, message=message or value.message, **payload)
else:
value = Value(value=value, status=status or Value.Status.Success, message=message, **kwargs)
## Save the slot:
self._values[name] = value
## Done, return the value set:
return value
def delval(self, name):
"""
Deletes a stored value.
:param name: The name of the value.
"""
if name in self._values:
del self._values[name]
def allvals(self):
"""
Returns all the value slots.
:return: A dictionary of all computed value slots.
"""
return {field: self.getval(field) for field in self._fields}
def val_none(self, name):
"""
Indicates if the value is None.
:param name: The name of the value slot.
:return: Boolean indicating if the value is None.
"""
return self.getval(name).value is None
def val_blank(self, name):
"""
Indicates if the value is blank.
:param name: The name of the value slot.
:return: Boolean indicating if the value is blank.
"""
return self.getval(name).value == ""
def val_some(self, name):
"""
Indicates if the value is something other than None or blank.
:param name: The name of the value slot.
:return: Boolean indicating if the value is something other than None or blank.
"""
return not self.val_none(name) and not self.val_blank(name)
def val_success(self, name):
"""
Indicates if the value is success.
:param name: The name of the value slot.
:return: Boolean indicating if the value is success.
"""
return self.getval(name).status == Value.Status.Success
def val_warning(self, name):
"""
Indicates if the value is warning.
:param name: The name of the value slot.
:return: Boolean indicating if the value is warning.
"""
return self.getval(name).status == Value.Status.Warning
def val_error(self, name):
"""
Indicates if the value is error.
:param name: The name of the value slot.
:return: Boolean indicating if the value is error.
"""
return self.getval(name).status == Value.Status.Error
def as_dict(self, detailed=False):
"""
Provides a JSON representation of the record instance.
:param detailed: Indicates if we need detailed result, ie. with status and message for each field.
:return: A JSON representation of the record instance.
"""
## We have the fields and values saved in the `_fields` and `_values` attributes respectively. We will
## simply iterate over these fields and their respective values.
##
## Let's start with defining the data dictionary:
retval = OrderedDict([])
## Iterate over fields and get their values:
for key in sorted(self._fields):
## Add the field to return value:
retval[key] = getattr(self, key, None)
## If detailed, override with real Value instance:
if detailed:
## Get the value:
value = self._values.get(key, None)
## Add the value:
retval[key] = OrderedDict([("value", str(value.value)),
("status", value.status),
("message", value.message)])
## Done, return the value:
return retval
@classmethod
def new(cls, record, **kwargs):
"""
Creates a new record from the provided record or dictionary and overriding values from the provided additional
named arguments.
:param record: The record or dictionary to be copied from.
:param kwargs: Named arguments to override.
:return: New record.
"""
## First of all, get the record as value dictionary:
base = copy.deepcopy(record.as_dict() if isinstance(record, Record) else record)
## Update the dictionary:
base.update(kwargs)
## Done, create the new record and return:
return cls(base)
|
bsd-2-clause
| 2,155,762,320,283,130,600
| 28.674893
| 120
| 0.576599
| false
| 4.025764
| false
| false
| false
|
CIECODE-Madrid/tipi-engine
|
stats/process_stats.py
|
1
|
6001
|
from tipi_data.models.stats import Stats
from tipi_data.models.topic import Topic
from tipi_data.models.initiative import Initiative
class GenerateStats(object):
def __init__(self):
self.topics = Topic.objects()
self.subtopics = self.topics.distinct('tags.subtopic')
self.stats = Stats()
def generate(self):
Stats.objects().delete()
self.overall()
self.deputies_by_topics()
self.deputies_by_subtopics()
self.parliamentarygroups_by_topics()
self.parliamentarygroups_by_subtopics()
self.places_by_topics()
self.places_by_subtopics()
self.stats.save()
def overall(self):
self.stats['overall'] = {
'initiatives': Initiative.objects.count(),
'allinitiatives': Initiative.all.count(),
'topics': list(),
'subtopics': list()
}
pipeline = [
{'$match': {'topics': {'$exists': True, '$not': {'$size': 0}}}},
{'$unwind': '$topics'},
{'$group': {'_id': '$topics', 'initiatives': {'$sum': 1}}},
{'$sort': {'initiatives': -1}}
]
result = Initiative.objects().aggregate(*pipeline)
for item in result:
self.stats['overall']['topics'].append(item)
for subtopic in self.subtopics:
pipeline = [
{'$match': {'tags.subtopic': subtopic}},
{'$group': {'_id': subtopic, 'initiatives': {'$sum': 1}}}
]
result = Initiative.objects().aggregate(*pipeline)
if result._has_next():
self.stats['overall']['subtopics'].append(result.next())
self.stats['overall']['subtopics'].sort(key=lambda x: x['initiatives'], reverse=True)
def deputies_by_topics(self):
self.stats['deputiesByTopics'] = list()
for topic in self.topics:
pipeline = [
{'$match': {'topics': topic['name']}}, {'$unwind': '$author_deputies'},
{'$group': {'_id': '$author_deputies', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}},
{'$limit': 10}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['deputiesByTopics'].append({
'_id': topic['name'],
'deputies': result
})
def parliamentarygroups_by_topics(self):
self.stats['parliamentarygroupsByTopics'] = list()
for topic in self.topics:
pipeline = [
{'$match': {'topics': topic['name']}}, {'$unwind': '$author_parliamentarygroups'},
{'$group': {'_id': '$author_parliamentarygroups', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['parliamentarygroupsByTopics'].append({
'_id': topic['name'],
'parliamentarygroups': result
})
def places_by_topics(self):
self.stats['placesByTopics'] = list()
for topic in self.topics:
pipeline = [
{'$match': {'topics': topic['name'], 'place': {'$not': {'$eq': ""}, '$exists': True}}},
{'$group': {'_id': '$place', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}},
{'$limit': 5}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['placesByTopics'].append({
'_id': topic['name'],
'places': result
})
def deputies_by_subtopics(self):
self.stats['deputiesBySubtopics'] = list()
for subtopic in self.subtopics:
pipeline = [
{'$match': { 'tags.subtopic': subtopic } }, {'$unwind': '$author_deputies'},
{'$group': {'_id': '$author_deputies', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}},
{'$limit': 10}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['deputiesBySubtopics'].append({
'_id': subtopic,
'deputies': result
})
def parliamentarygroups_by_subtopics(self):
self.stats['parliamentarygroupsBySubtopics'] = list()
for subtopic in self.subtopics:
pipeline = [
{'$match': { 'tags.subtopic': subtopic } }, {'$unwind': '$author_parliamentarygroups'},
{'$group': {'_id': '$author_parliamentarygroups', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['parliamentarygroupsBySubtopics'].append({
'_id': subtopic,
'parliamentarygroups': result
})
def places_by_subtopics(self):
self.stats['placesBySubtopics'] = list()
for subtopic in self.subtopics:
pipeline = [
{'$match': { 'tags.subtopic': subtopic, 'place': {'$not': {'$eq': ""}, '$exists': True}}},
{'$group': {'_id': '$place', 'initiatives': {'$sum': 1}}}, {'$sort': {'initiatives': -1}},
{'$limit': 5}
]
result = list(Initiative.objects().aggregate(*pipeline))
if len(result) > 0:
self.stats['placesBySubtopics'].append({
'_id': subtopic,
'places': result
})
if __name__ == "__main__":
GenerateStats().generate()
|
gpl-3.0
| 3,043,947,720,192,462,300
| 41.864286
| 130
| 0.481753
| false
| 4.193571
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.