code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# pylint: disable=g-bad-import-order
from isl import augment
from isl import test_util
from isl import util
flags = tf.flags
test = tf.test
lt = tf.contrib.labeled_tensor
FLAGS = flags.FLAGS
class CorruptTest(test_util.Base):
def setUp(self):
super(CorruptTest, self).setUp()
self.signal_lt = lt.select(self.input_lt, {'mask': util.slice_1(False)})
rc = lt.ReshapeCoder(['z', 'channel', 'mask'], ['channel'])
self.corrupt_coded_lt = augment.corrupt(0.1, 0.05, 0.1,
rc.encode(self.signal_lt))
self.corrupt_lt = rc.decode(self.corrupt_coded_lt)
def test_name(self):
self.assertIn('corrupt', self.corrupt_coded_lt.name)
def test(self):
self.assertEqual(self.corrupt_lt.axes, self.signal_lt.axes)
self.save_images('corrupt', [self.get_images('', self.corrupt_lt)])
self.assert_images_near('corrupt', True)
class AugmentTest(test_util.Base):
def setUp(self):
super(AugmentTest, self).setUp()
ap = augment.AugmentParameters(0.1, 0.05, 0.1)
self.input_augment_lt, self.target_augment_lt = augment.augment(
ap, self.input_lt, self.target_lt)
def test_name(self):
self.assertIn('augment/input', self.input_augment_lt.name)
self.assertIn('augment/target', self.target_augment_lt.name)
def test(self):
self.assertEqual(self.input_augment_lt.axes, self.input_lt.axes)
self.assertEqual(self.target_augment_lt.axes, self.target_lt.axes)
self.save_images('augment', [
self.get_images('input_', self.input_augment_lt),
self.get_images('target_', self.target_augment_lt)
])
self.assert_images_near('augment', True)
if __name__ == '__main__':
test.main()
|
google/in-silico-labeling
|
isl/augment_test.py
|
Python
|
apache-2.0
| 2,507
|
# coding: utf8
from functools import wraps
from logging import getLogger
logger = getLogger(__name__)
__author__ = 'marcos.costa'
class request_logger(object):
def __init__(self, method=None):
self.method = method
def __call__(self, func):
method = self.method
if method is None:
method = func.func_name
@wraps(func)
def wrapper(instance, request, *args, **kwargs):
response = func(instance, request, *args, **kwargs)
msg = ("\nCalled method: {method}\nrequest: {request}"
"\nresponse: {response}").format(method=method,
request=request,
response=response)
logger.info(msg)
return response
return wrapper
|
GTACSolutions/python-braspag
|
python_braspag/decorators.py
|
Python
|
apache-2.0
| 849
|
import npyscreen
import os
import re
import sys
import time
from docker.errors import DockerException
from npyscreen import notify_confirm
from threading import Thread
from vent.api.actions import Action
from vent.api.menu_helpers import MenuHelper
from vent.helpers.meta import Containers
from vent.helpers.meta import Cpu
from vent.helpers.meta import DropLocation
from vent.helpers.meta import Gpu
from vent.helpers.meta import Images
from vent.helpers.meta import Jobs
from vent.helpers.meta import Timestamp
from vent.helpers.meta import Uptime
from vent.helpers.logs import Logger
from vent.helpers.paths import PathDirs
from vent.menus.add import AddForm
from vent.menus.ntap import CreateNTap
from vent.menus.ntap import DeleteNTap
from vent.menus.ntap import ListNTap
from vent.menus.ntap import NICsNTap
from vent.menus.ntap import StartNTap
from vent.menus.ntap import StopNTap
from vent.menus.backup import BackupForm
from vent.menus.editor import EditorForm
from vent.menus.inventory_forms import InventoryCoreToolsForm
from vent.menus.inventory_forms import InventoryToolsForm
from vent.menus.logs import LogsForm
from vent.menus.services import ServicesForm
from vent.menus.tools import ToolForm
class MainForm(npyscreen.FormBaseNewWithMenus):
""" Main information landing form for the Vent CLI """
@staticmethod
def exit(*args, **kwargs):
os.system('reset')
os.system('stty sane')
try:
sys.exit(0)
except SystemExit: # pragma: no cover
os._exit(0)
@staticmethod
def t_status(core):
""" Get status of tools for either plugins or core """
m_helper = MenuHelper()
repos, tools = m_helper.tools_status(core)
installed = 0
custom_installed = 0
built = 0
custom_built = 0
running = 0
custom_running = 0
normal = str(len(tools['normal']))
# determine how many extra instances should be shown for running
norm = set(tools['normal'])
inst = set(tools['installed'])
run_str = str(len(tools['normal']) + len(inst - norm))
for tool in tools['running']:
# check for multi instances too for running
if tool in tools['normal']:
running += 1
elif re.sub(r'\d+$', '', tool) in tools['normal']:
running += 1
else:
custom_running += 1
for tool in tools['built']:
if tool in tools['normal']:
built += 1
else:
custom_built += 1
for tool in tools['installed']:
if tool in tools['normal']:
installed += 1
elif re.sub(r'\d+$', '', tool) not in tools['normal']:
custom_installed += 1
tools_str = str(running + custom_running) + "/" + run_str + " running"
if custom_running > 0:
tools_str += " (" + str(custom_running) + " custom)"
tools_str += ", " + str(built + custom_built) + "/" + normal + " built"
if custom_built > 0:
tools_str += " (" + str(custom_built) + " custom)"
tools_str += ", " + str(installed + custom_installed) + "/" + normal
tools_str += " installed"
if custom_built > 0:
tools_str += " (" + str(custom_installed) + " custom)"
return tools_str, (running, custom_running, normal, repos)
def while_waiting(self):
""" Update fields periodically if nothing is happening """
# give a little extra time for file descriptors to close
time.sleep(0.1)
self.addfield.value = Timestamp()
self.addfield.display()
self.addfield2.value = Uptime()
self.addfield2.display()
self.addfield3.value = str(len(Containers()))+" running"
if len(Containers()) > 0:
self.addfield3.labelColor = "GOOD"
else:
self.addfield3.labelColor = "DEFAULT"
self.addfield3.display()
# update core tool status
self.addfield5.value, values = MainForm.t_status(True)
if values[0] + values[1] == 0:
color = "DANGER"
self.addfield4.labelColor = "CAUTION"
self.addfield4.value = "Idle"
elif values[0] >= int(values[2]):
color = "GOOD"
self.addfield4.labelColor = color
self.addfield4.value = "Ready to start jobs"
else:
color = "CAUTION"
self.addfield4.labelColor = color
self.addfield4.value = "Ready to start jobs"
self.addfield5.labelColor = color
# update plugin tool status
plugin_str, values = MainForm.t_status(False)
plugin_str += ", " + str(values[3]) + " plugin(s) installed"
self.addfield6.value = plugin_str
# get jobs
jobs = Jobs()
# number of jobs, number of tool containers
self.addfield7.value = str(jobs[0]) + " jobs running (" + str(jobs[1])
self.addfield7.value += " tool containers), " + str(jobs[2])
self.addfield7.value += " completed jobs"
if jobs[0] > 0:
self.addfield4.labelColor = "GOOD"
self.addfield4.value = "Processing jobs"
self.addfield7.labelColor = "GOOD"
else:
self.addfield7.labelColor = "DEFAULT"
self.addfield4.display()
self.addfield5.display()
self.addfield6.display()
self.addfield7.display()
# if file drop location changes deal with it
logger = Logger(__name__)
status = (False, None)
if self.file_drop.value != DropLocation()[1]:
logger.info("Starting: file drop restart")
try:
self.file_drop.value = DropLocation()[1]
logger.info("Path given: " + str(self.file_drop.value))
# restart if the path is valid
if DropLocation()[0]:
status = self.api_action.clean(name='file_drop')
status = self.api_action.prep_start(name='file_drop')
else:
logger.error("file drop path name invalid" +
DropLocation()[1])
if status[0]:
tool_d = status[1]
status = self.api_action.start(tool_d)
logger.info("Status of file drop restart: " +
str(status[0]))
except Exception as e: # pragma no cover
logger.error("file drop restart failed with error: " + str(e))
logger.info("Finished: file drop restart")
self.file_drop.display()
return
@staticmethod
def core_tools(action):
""" Perform actions for core tools """
def diff(first, second):
"""
Get the elements that exist in the first list and not in the second
"""
second = set(second)
return [item for item in first if item not in second]
def popup(original, orig_type, thr, title):
"""
Start the thread and display a popup of info
until the thread is finished
"""
thr.start()
info_str = ""
while thr.is_alive():
if orig_type == 'containers':
info = diff(Containers(), original)
elif orig_type == 'images':
info = diff(Images(), original)
if info:
info_str = ""
for entry in info:
# TODO limit length of info_str to fit box
info_str += entry[0]+": "+entry[1]+"\n"
npyscreen.notify_wait(info_str, title=title)
time.sleep(1)
return
if action == 'install':
original_images = Images()
m_helper = MenuHelper()
thr = Thread(target=m_helper.cores, args=(),
kwargs={"action": "install"})
popup(original_images, "images", thr,
'Please wait, installing core containers...')
notify_confirm("Done installing core containers (any"
" already installed tools untouched).",
title='Installed core containers')
return
def add_form(self, form, form_name, form_args):
""" Add new form and switch to it """
self.parentApp.addForm(form_name, form, **form_args)
self.parentApp.change_form(form_name)
return
def remove_forms(self, form_names):
""" Remove all forms supplied """
for form in form_names:
try:
self.parentApp.removeForm(form)
except Exception as e: # pragma: no cover
pass
return
def perform_action(self, action):
""" Perform actions in the api from the CLI """
form = ToolForm
s_action = action.split("_")[0]
if 'core' in action:
form_action = s_action + ' (only core tools are shown)'
form_name = s_action.title() + " core tools"
cores = True
else:
form_action = s_action + ' (only plugin tools are shown)'
form_name = s_action.title() + " tools"
cores = False
a_type = 'containers'
if s_action in ['build']:
a_type = 'images'
forms = [action.upper() + 'TOOLS']
form_args = {'color': 'CONTROL',
'names': [s_action],
'name': form_name,
'action_dict': {'action_name': s_action,
'present_t': s_action + 'ing ' + a_type,
'past_t': s_action.title() + ' ' + a_type,
'action': form_action,
'type': a_type,
'cores': cores}}
# grammar rules
vowels = ['a', 'e', 'i', 'o', 'u']
# consonant-vowel-consonant ending
# Eg: stop -> stopping
if s_action[-1] not in vowels and \
s_action[-2] in vowels and \
s_action[-3] not in vowels:
form_args['action_dict']['present_t'] = s_action + \
s_action[-1] + 'ing ' + a_type
# word ends with a 'e'
# eg: remove -> removing
if s_action[-1] == 'e':
form_args['action_dict']['present_t'] = s_action[:-1] \
+ 'ing ' + a_type
if s_action == 'start':
form_args['names'].append('prep_start')
elif s_action == 'configure':
form_args['names'].pop()
form_args['names'].append('get_configure')
form_args['names'].append('save_configure')
form_args['names'].append('restart_tools')
if action == 'add':
form = AddForm
forms = ['ADD', 'ADDOPTIONS', 'CHOOSETOOLS']
form_args['name'] = "Add plugins"
form_args['name'] += "\t"*6 + "^Q to quit"
elif action == "inventory":
form = InventoryToolsForm
forms = ['INVENTORY']
form_args = {'color': "STANDOUT", 'name': "Inventory of tools"}
elif action == 'logs':
form = LogsForm
forms = ['LOGS']
form_args = {'color': "STANDOUT", 'name': "Logs"}
elif action == 'services_core':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': "STANDOUT",
'name': "Core Services",
'core': True}
elif action == 'services':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': "STANDOUT",
'name': "Plugin Services",
'core': False}
elif action == 'services_external':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': "STANDOUT",
'name': "External Services",
'core': False,
'external': True}
elif action == "inventory_core":
form = InventoryCoreToolsForm
forms = ['COREINVENTORY']
form_args = {'color': "STANDOUT",
'name': "Inventory of core tools"}
form_args['name'] += "\t"*8 + "^T to toggle main"
if s_action in self.view_togglable:
form_args['name'] += "\t"*8 + "^V to toggle group view"
try:
self.remove_forms(forms)
thr = Thread(target=self.add_form, args=(),
kwargs={'form': form,
'form_name': forms[0],
'form_args': form_args})
thr.start()
while thr.is_alive():
npyscreen.notify('Please wait, loading form...',
title='Loading')
time.sleep(1)
except Exception as e: # pragma: no cover
pass
return
def switch_tutorial(self, action):
""" Tutorial forms """
if action == "background":
self.parentApp.change_form('TUTORIALBACKGROUND')
elif action == "terminology":
self.parentApp.change_form('TUTORIALTERMINOLOGY')
elif action == "setup":
self.parentApp.change_form('TUTORIALGETTINGSETUP')
elif action == "building_cores":
self.parentApp.change_form('TUTORIALBUILDINGCORES')
elif action == "starting_cores":
self.parentApp.change_form('TUTORIALSTARTINGCORES')
elif action == "adding_plugins":
self.parentApp.change_form('TUTORIALADDINGPLUGINS')
elif action == "adding_files":
self.parentApp.change_form('TUTORIALADDINGFILES')
elif action == "basic_troubleshooting":
self.parentApp.change_form('TUTORIALTROUBLESHOOTING')
return
def system_commands(self, action):
""" Perform system commands """
if action == 'backup':
status = self.api_action.backup()
if status[0]:
notify_confirm("Vent backup successful")
else:
notify_confirm("Vent backup could not be completed")
elif action == 'configure':
form_args = {'name': 'Change vent configuration',
'get_configure': self.api_action.get_configure,
'save_configure': self.api_action.save_configure,
'restart_tools': self.api_action.restart_tools,
'vent_cfg': True}
add_kargs = {'form': EditorForm,
'form_name': 'CONFIGUREVENT',
'form_args': form_args}
self.add_form(**add_kargs)
elif action == "reset":
okay = npyscreen.notify_ok_cancel(
"This factory reset will remove ALL of Vent's user data, "
"containers, and images. Are you sure?",
title="Confirm system command")
if okay:
status = self.api_action.reset()
if status[0]:
notify_confirm("Vent reset complete. "
"Press OK to exit Vent Manager console.")
else:
notify_confirm(status[1])
MainForm.exit()
elif action == "gpu":
gpu = Gpu(pull=True)
if gpu[0]:
notify_confirm("GPU detection successful. "
"Found: " + gpu[1])
else:
if gpu[1] == "Unknown":
notify_confirm("Unable to detect GPUs, try `make gpu` "
"from the vent repository directory. "
"Error: " + str(gpu[2]))
else:
notify_confirm("No GPUs detected.")
elif action == 'restore':
backup_dir_home = os.path.expanduser('~')
backup_dirs = [f for f in os.listdir(backup_dir_home) if
f.startswith('.vent-backup')]
form_args = {'restore': self.api_action.restore,
'dirs': backup_dirs,
'name': "Pick a version to restore from" + "\t"*8 +
"^T to toggle main",
'color': 'CONTROL'}
add_kargs = {'form': BackupForm,
'form_name': 'CHOOSEBACKUP',
'form_args': form_args}
self.add_form(**add_kargs)
elif action == "swarm":
# !! TODO
# add notify_cancel_ok popup once implemented
pass
elif action == "upgrade":
# !! TODO
# add notify_cancel_ok popup once implemented
pass
# deal with all network tap actions
elif 'ntap' in action:
# check if the tool is installed, built, and running
output = self.api_action.tool_status_output('network_tap')
# create a dict with substring as keys and forms as values
ntap_form = {'create': CreateNTap,
'delete': DeleteNTap,
'list': ListNTap,
'nics': NICsNTap,
'start': StartNTap,
'stop': StopNTap}
if output[0]:
if output[1]:
notify_confirm(output[1])
else:
# action regarding ntap come in the form of 'ntapcreate'
# 'ntapdelete', etc
tap_action = action.split('ntap')[1]
form_args = {'color': 'CONTROL',
'name': 'Network Tap Interface ' +
tap_action + "\t"*6 +
'^T to toggle main'}
self.add_form(ntap_form[tap_action], "Network Tap " +
tap_action.title(), form_args)
return
def create(self):
""" Override method for creating FormBaseNewWithMenu form """
try:
self.api_action = Action()
except DockerException as de: # pragma: no cover
notify_confirm(str(de),
title="Docker Error",
form_color='DANGER',
wrap=True)
MainForm.exit()
self.add_handlers({"^T": self.help_form, "^Q": MainForm.exit})
# all forms that can toggle view by group
self.view_togglable = ['inventory', 'remove', 'update', 'enable',
'disable', 'build']
#######################
# MAIN SCREEN WIDGETS #
#######################
self.addfield = self.add(npyscreen.TitleFixedText, name='Date:',
labelColor='DEFAULT', value=Timestamp())
self.addfield2 = self.add(npyscreen.TitleFixedText, name='Uptime:',
labelColor='DEFAULT', value=Uptime())
self.cpufield = self.add(npyscreen.TitleFixedText,
name='Logical CPUs:',
labelColor='DEFAULT', value=Cpu())
self.gpufield = self.add(npyscreen.TitleFixedText, name='GPUs:',
labelColor='DEFAULT', value=Gpu()[1])
self.location = self.add(npyscreen.TitleFixedText,
name='User Data:',
value=PathDirs().meta_dir,
labelColor='DEFAULT')
self.file_drop = self.add(npyscreen.TitleFixedText,
name='File Drop:',
value=DropLocation()[1],
labelColor='DEFAULT')
self.addfield3 = self.add(npyscreen.TitleFixedText, name='Containers:',
labelColor='DEFAULT',
value="0 "+" running")
self.addfield4 = self.add(npyscreen.TitleFixedText, name='Status:',
labelColor='CAUTION',
value="Idle")
self.addfield5 = self.add(npyscreen.TitleFixedText,
name='Core Tools:', labelColor='DANGER',
value="Not built")
self.addfield6 = self.add(npyscreen.TitleFixedText,
name='Plugin Tools:', labelColor='DEFAULT',
value="Not built")
self.addfield7 = self.add(npyscreen.TitleFixedText, name='Jobs:',
value="0 jobs running (0 tool containers),"
" 0 completed jobs", labelColor='DEFAULT')
self.multifield1 = self.add(npyscreen.MultiLineEdit, max_height=22,
editable=False, value="""
'.,
'b *
'$ #.
$: #:
*# @):
:@,@): ,.**:'
, :@@*: ..**'
'#o. .:(@'.@*"'
'bq,..:,@@*' ,*
,p$q8,:@)' .p*'
' '@@Pp@@*'
Y7'.'
:@):.
.:@:'.
.::(@:.
_
__ _____ _ __ | |_
\ \ / / _ \ '_ \| __|
\ V / __/ | | | |_
\_/ \___|_| |_|\__|
""")
################
# MENU OPTIONS #
################
# Core Tools Menu Items
self.m2 = self.add_menu(name="Core Tools", shortcut="c")
self.m2.addItem(text='Add all latest core tools',
onSelect=MainForm.core_tools,
arguments=['install'], shortcut='i')
self.m2.addItem(text='Build core tools',
onSelect=self.perform_action,
arguments=['build_core'], shortcut='b')
self.m2.addItem(text='Clean core tools',
onSelect=self.perform_action,
arguments=['clean_core'], shortcut='c')
self.m2.addItem(text='Configure core tools',
onSelect=self.perform_action,
arguments=['configure_core'], shortcut='t')
self.m2.addItem(text='Disable core tools',
onSelect=self.perform_action,
arguments=['disable_core'], shortcut='d')
self.m2.addItem(text='Enable core tools',
onSelect=self.perform_action,
arguments=['enable_core'], shortcut='e')
self.m2.addItem(text='Inventory of core tools',
onSelect=self.perform_action,
arguments=['inventory_core'], shortcut='v')
self.m2.addItem(text='Remove core tools',
onSelect=self.perform_action,
arguments=['remove_core'], shortcut='r')
self.m2.addItem(text='Start core tools',
onSelect=self.perform_action,
arguments=['start_core'], shortcut='s')
self.m2.addItem(text='Stop core tools',
onSelect=self.perform_action,
arguments=['stop_core'], shortcut='p')
self.m2.addItem(text='Update core tools',
onSelect=self.perform_action,
arguments=['update_core'], shortcut='u')
# Plugin Menu Items
self.m3 = self.add_menu(name="Plugins", shortcut="p")
self.m3.addItem(text='Add new plugin',
onSelect=self.perform_action,
arguments=['add'], shortcut='a')
self.m3.addItem(text='Build plugin tools',
onSelect=self.perform_action,
arguments=['build'], shortcut='b')
self.m3.addItem(text='Clean plugin tools',
onSelect=self.perform_action,
arguments=['clean'], shortcut='c')
self.m3.addItem(text='Configure plugin tools',
onSelect=self.perform_action,
arguments=['configure'], shortcut='t')
self.m3.addItem(text='Disable plugin tools',
onSelect=self.perform_action,
arguments=['disable'], shortcut='d')
self.m3.addItem(text='Enable plugin tools',
onSelect=self.perform_action,
arguments=['enable'], shortcut='e')
self.m3.addItem(text='Inventory of installed plugins',
onSelect=self.perform_action,
arguments=['inventory'], shortcut='i')
self.m3.addItem(text='Remove plugins',
onSelect=self.perform_action,
arguments=['remove'], shortcut='r')
self.m3.addItem(text='Start plugin tools',
onSelect=self.perform_action,
arguments=['start'], shortcut='s')
self.m3.addItem(text='Stop plugin tools',
onSelect=self.perform_action,
arguments=['stop'], shortcut='p')
self.m3.addItem(text='Update plugins',
onSelect=self.perform_action,
arguments=['update'], shortcut='u')
# Log Menu Items
self.m4 = self.add_menu(name="Logs", shortcut="l")
self.m4.addItem(text='Get container logs', arguments=['logs'],
onSelect=self.perform_action)
# Services Menu Items
self.m5 = self.add_menu(name="Services Running", shortcut='s')
self.m5.addItem(text='Core Services', onSelect=self.perform_action,
arguments=['services_core'], shortcut='c')
self.m5.addItem(text='External Services', onSelect=self.perform_action,
arguments=['services_external'], shortcut='e')
self.m5.addItem(text='Plugin Services',
onSelect=self.perform_action,
arguments=['services'], shortcut='p')
# System Commands Menu Items
self.m6 = self.add_menu(name="System Commands", shortcut='y')
self.m6.addItem(text='Backup', onSelect=self.system_commands,
arguments=['backup'], shortcut='b')
self.m6.addItem(text='Change vent configuration',
onSelect=self.system_commands, arguments=['configure'],
shortcut='c')
self.m6.addItem(text='Detect GPUs', onSelect=self.system_commands,
arguments=['gpu'], shortcut='g')
self.m6.addItem(text='Enable Swarm Mode (To Be Implemented...)',
onSelect=self.system_commands,
arguments=['swarm'], shortcut='s')
self.m6.addItem(text='Factory reset', onSelect=self.system_commands,
arguments=['reset'], shortcut='r')
self.s6 = self.m6.addNewSubmenu(name='Network Tap Interface',
shortcut='n')
self.m6.addItem(text='Restore', onSelect=self.system_commands,
arguments=['restore'], shortcut='t')
self.m6.addItem(text='Upgrade (To Be Implemented...)',
onSelect=self.system_commands,
arguments=['upgrade'], shortcut='u')
self.s6.addItem(text='Create', onSelect=self.system_commands,
shortcut='c', arguments=['ntapcreate'])
self.s6.addItem(text='Delete', onSelect=self.system_commands,
shortcut='d', arguments=['ntapdelete'])
self.s6.addItem(text='List', onSelect=self.system_commands,
shortcut='l', arguments=['ntaplist'])
self.s6.addItem(text='NICs', onSelect=self.system_commands,
shortcut='n', arguments=['ntapnics'])
self.s6.addItem(text='Start', onSelect=self.system_commands,
shortcut='s', arguments=['ntapstart'])
self.s6.addItem(text='Stop', onSelect=self.system_commands,
shortcut='t', arguments=['ntapstop'])
# Tutorial Menu Items
self.m7 = self.add_menu(name="Tutorials", shortcut="t")
self.s1 = self.m7.addNewSubmenu(name="About Vent", shortcut='v')
self.s1.addItem(text="Background", onSelect=self.switch_tutorial,
arguments=['background'], shortcut='b')
self.s1.addItem(text="Terminology", onSelect=self.switch_tutorial,
arguments=['terminology'], shortcut='t')
self.s1.addItem(text="Getting Setup", onSelect=self.switch_tutorial,
arguments=['setup'], shortcut='s')
self.s2 = self.m7.addNewSubmenu(name="Working with Cores",
shortcut='c')
self.s2.addItem(text="Building Cores", onSelect=self.switch_tutorial,
arguments=['building_cores'], shortcut='b')
self.s2.addItem(text="Starting Cores", onSelect=self.switch_tutorial,
arguments=['starting_cores'], shortcut='c')
self.s3 = self.m7.addNewSubmenu(name="Working with Plugins",
shortcut='p')
self.s3.addItem(text="Adding Plugins", onSelect=self.switch_tutorial,
arguments=['adding_plugins'], shortcut='a')
self.s4 = self.m7.addNewSubmenu(name="Files", shortcut='f')
self.s4.addItem(text="Adding Files", onSelect=self.switch_tutorial,
arguments=['adding_files'], shortcut='a')
self.s5 = self.m7.addNewSubmenu(name="Help", shortcut='s')
self.s5.addItem(text="Basic Troubleshooting",
onSelect=self.switch_tutorial,
arguments=['basic_troubleshooting'], shortcut='t')
def help_form(self, *args, **keywords):
""" Toggles to help """
self.parentApp.change_form("HELP")
|
lilchurro/vent
|
vent/menus/main.py
|
Python
|
apache-2.0
| 30,458
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This is CoGroupByKey load test with Synthetic Source. Besides of the standard
input options there are additional options:
* project (optional) - the gcp project in case of saving
metrics in Big Query (in case of Dataflow Runner
it is required to specify project of runner),
* metrics_namespace (optional) - name of BigQuery table where metrics
will be stored,
in case of lack of any of both options metrics won't be saved
* input_options - options for Synthetic Sources
* co_input_options - options for Synthetic Sources.
Example test run on DirectRunner:
python setup.py nosetests \
--test-pipeline-options="
--project=big-query-project
--metrics_dataset=python_load_tests
--metrics_table=co_gbk
--input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0}'
--co_input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0}'" \
--tests apache_beam.testing.load_tests.co_group_by_key_test
To run test on other runner (ex. Dataflow):
python setup.py nosetests \
--test-pipeline-options="
--runner=TestDataflowRunner
--project=...
--staging_location=gs://...
--temp_location=gs://...
--sdk_location=./dist/apache-beam-x.x.x.dev0.tar.gz
--metrics_dataset=python_load_tests
--metrics_table=co_gbk
--input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0
}'
--co_input_options='{
\"num_records\": 1000,
\"key_size\": 5,
\"value_size\":15,
\"bundle_size_distribution_type\": \"const\",
\"bundle_size_distribution_param\": 1,
\"force_initial_num_bundles\":0
}'" \
--tests apache_beam.testing.load_tests.co_group_by_key_test
"""
from __future__ import absolute_import
import json
import logging
import unittest
import apache_beam as beam
from apache_beam.testing import synthetic_pipeline
from apache_beam.testing.test_pipeline import TestPipeline
try:
from apache_beam.testing.load_tests.load_test_metrics_utils import MeasureTime
from apache_beam.testing.load_tests.load_test_metrics_utils import MetricsMonitor
from google.cloud import bigquery as bq
except ImportError:
bq = None
INPUT_TAG = 'pc1'
CO_INPUT_TAG = 'pc2'
RUNTIME_LABEL = 'runtime'
@unittest.skipIf(bq is None, 'BigQuery for storing metrics not installed')
class CoGroupByKeyTest(unittest.TestCase):
def parseTestPipelineOptions(self, options):
return {
'numRecords': options.get('num_records'),
'keySizeBytes': options.get('key_size'),
'valueSizeBytes': options.get('value_size'),
'bundleSizeDistribution': {
'type': options.get(
'bundle_size_distribution_type', 'const'
),
'param': options.get('bundle_size_distribution_param', 0)
},
'forceNumInitialBundles': options.get(
'force_initial_num_bundles', 0
)
}
def setUp(self):
self.pipeline = TestPipeline(is_integration_test=True)
self.input_options = json.loads(self.pipeline.get_option('input_options'))
self.co_input_options = json.loads(
self.pipeline.get_option('co_input_options'))
metrics_project_id = self.pipeline.get_option('project')
self.metrics_namespace = self.pipeline.get_option('metrics_table')
metrics_dataset = self.pipeline.get_option('metrics_dataset')
self.metrics_monitor = None
check = metrics_project_id and self.metrics_namespace and metrics_dataset\
is not None
if check:
measured_values = [{'name': RUNTIME_LABEL,
'type': 'FLOAT',
'mode': 'REQUIRED'}]
self.metrics_monitor = MetricsMonitor(
project_name=metrics_project_id,
table=self.metrics_namespace,
dataset=metrics_dataset,
schema_map=measured_values
)
else:
logging.error('One or more of parameters for collecting metrics '
'are empty. Metrics will not be collected')
class _Ungroup(beam.DoFn):
def process(self, element):
values = element[1]
inputs = values.get(INPUT_TAG)
co_inputs = values.get(CO_INPUT_TAG)
for i in inputs:
yield i
for i in co_inputs:
yield i
def testCoGroupByKey(self):
with self.pipeline as p:
pc1 = (p
| 'Read ' + INPUT_TAG >> beam.io.Read(
synthetic_pipeline.SyntheticSource(
self.parseTestPipelineOptions(self.input_options)))
| 'Make ' + INPUT_TAG + ' iterable' >> beam.Map(lambda x: (x, x))
| 'Measure time: Start pc1' >> beam.ParDo(
MeasureTime(self.metrics_namespace))
)
pc2 = (p
| 'Read ' + CO_INPUT_TAG >> beam.io.Read(
synthetic_pipeline.SyntheticSource(
self.parseTestPipelineOptions(self.co_input_options)))
| 'Make ' + CO_INPUT_TAG + ' iterable' >> beam.Map(
lambda x: (x, x))
| 'Measure time: Start pc2' >> beam.ParDo(
MeasureTime(self.metrics_namespace))
)
# pylint: disable=expression-not-assigned
({INPUT_TAG: pc1, CO_INPUT_TAG: pc2}
| 'CoGroupByKey: ' >> beam.CoGroupByKey()
| 'Consume Joined Collections' >> beam.ParDo(self._Ungroup())
| 'Measure time: End' >> beam.ParDo(MeasureTime(self.metrics_namespace))
)
result = p.run()
result.wait_until_finish()
if self.metrics_monitor is not None:
self.metrics_monitor.send_metrics(result)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
mxm/incubator-beam
|
sdks/python/apache_beam/testing/load_tests/co_group_by_key_test.py
|
Python
|
apache-2.0
| 7,004
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import itertools
from . import exceptions
async def get_files_for_tasks(task_list, file_list, max_workers):
no_files_found = True
async def process(task_fname):
task, fname = task_fname
try:
fobj = await task.file(fname)
except exceptions.SlaveDoesNotExist:
if task is None:
print(f"(Unknown Task):{fname} (Slave no longer exists)")
else:
print(f"{task['id']}:{task_fname} (Slave no longer exists)")
raise exceptions.SkipResult
if await fobj.exists():
return fobj
elements = itertools.chain(
*[[(task, fname) for fname in file_list] for task in task_list]
)
futures = [asyncio.ensure_future(process(element)) for element in elements]
if futures:
for result in asyncio.as_completed(futures):
try:
result = await result
if result:
no_files_found = False
yield result
except exceptions.SkipResult:
pass
if no_files_found:
raise exceptions.FileNotFoundForTaskException(
"None of the tasks in {} contain the files in list {}".format(
",".join([task["id"] for task in task_list]), ",".join(file_list)
)
)
|
Yelp/paasta
|
paasta_tools/mesos/cluster.py
|
Python
|
apache-2.0
| 2,143
|
# Copyright 2016 Sungard Availability Services
# Copyright 2016 Red Hat
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from ceilometer.agent import plugin_base
from ceilometer.i18n import _LW
from ceilometer import neutron_client
from ceilometer import sample
LOG = log.getLogger(__name__)
cfg.CONF.import_group('service_types', 'ceilometer.neutron_client')
class FloatingIPPollster(plugin_base.PollsterBase):
STATUS = {
'inactive': 0,
'active': 1,
'pending_create': 2,
}
def __init__(self):
self.neutron_cli = neutron_client.Client()
@property
def default_discovery(self):
return 'endpoint:%s' % cfg.CONF.service_types.neutron
@staticmethod
def _form_metadata_for_fip(fip):
"""Return a metadata dictionary for the fip usage data."""
metadata = {
'router_id': fip.get("router_id"),
'status': fip.get("status"),
'floating_network_id': fip.get("floating_network_id"),
'fixed_ip_address': fip.get("fixed_ip_address"),
'port_id': fip.get("port_id"),
'floating_ip_address': fip.get("floating_ip_address")
}
return metadata
def get_samples(self, manager, cache, resources):
for fip in self.neutron_cli.fip_get_all():
status = self.STATUS.get(fip['status'].lower())
if status is None:
LOG.warning(_LW("Invalid status, skipping IP address %s") %
fip['floating_ip_address'])
continue
res_metadata = self._form_metadata_for_fip(fip)
yield sample.Sample(
name='ip.floating',
type=sample.TYPE_GAUGE,
unit='ip',
volume=status,
user_id=fip.get('user_id'),
project_id=fip['tenant_id'],
resource_id=fip['id'],
timestamp=timeutils.utcnow().isoformat(),
resource_metadata=res_metadata
)
|
idegtiarov/ceilometer
|
ceilometer/network/floatingip.py
|
Python
|
apache-2.0
| 2,715
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bleeding-edge version of Unicode Character Database.
Provides an interface similar to Python's own unicodedata package, but with
the bleeding-edge data. The implementation is not efficient at all, it's
just done this way for the ease of use. The data is coming from bleeding
edge version of the Unicode Standard not yet published, so it is expected to
be unstable and sometimes inconsistent.
"""
__author__ = (
"roozbeh@google.com (Roozbeh Pournader) and " "cibu@google.com (Cibu Johny)"
)
import codecs
import collections
import os
from os import path
import re
from nototools.py23 import unichr, unicode, basestring
try:
import unicodedata2 as unicodedata # Unicode 8 compliant native lib
except ImportError:
import unicodedata # Python's internal library
from nototools import tool_utils # parse_int_ranges
# Update this when we update the base version data we use
UNICODE_VERSION = 14.0
_data_is_loaded = False
_property_value_aliases_data = {}
_character_names_data = {}
_general_category_data = {}
_combining_class_data = {}
_decomposition_data = {}
_bidi_mirroring_characters = set()
_script_data = {}
_script_extensions_data = {}
_block_data = {}
_block_range = {}
_block_names = []
_age_data = {}
_bidi_mirroring_glyph_data = {}
_core_properties_data = {}
_indic_positional_data = {}
_indic_syllabic_data = {}
_defined_characters = set()
_script_code_to_long_name = {}
_folded_script_name_to_code = {}
_lower_to_upper_case = {}
# emoji data
_presentation_default_emoji = None
_presentation_default_text = None
_emoji_modifier_base = None
_emoji = None
_emoji_variants = None
_emoji_variants_proposed = None
# non-emoji variant data
_variant_data = None
_variant_data_cps = None
# proposed emoji
_proposed_emoji_data = None
_proposed_emoji_data_cps = None
# emoji sequences
_emoji_sequence_data = None
_emoji_non_vs_to_canonical = None
_emoji_group_data = None
# nameslist/namealiases
_nameslist_see_also = None
_namealiases_alt_names = None
def load_data():
"""Loads the data files needed for the module.
Could be used by processes that care about controlling when the data is
loaded. Otherwise, data will be loaded the first time it's needed.
"""
global _data_is_loaded
if not _data_is_loaded:
_load_property_value_aliases_txt()
_load_unicode_data_txt()
_load_scripts_txt()
_load_script_extensions_txt()
_load_blocks_txt()
_load_derived_age_txt()
_load_derived_core_properties_txt()
_load_bidi_mirroring_txt()
_load_indic_data()
_load_emoji_data()
_load_emoji_sequence_data()
_load_unicode_emoji_variants()
_load_variant_data()
_load_proposed_emoji_data()
_load_nameslist_data()
_load_namealiases_data()
_data_is_loaded = True
def name(char, *args):
"""Returns the name of a character.
Raises a ValueError exception if the character is undefined, unless an
extra argument is given, in which case it will return that argument.
"""
if isinstance(char, int):
char = unichr(char)
# First try and get the name from unidata, which is faster and supports
# CJK and Hangul automatic names
try:
return unicodedata.name(char)
except ValueError as val_error:
cp = ord(char)
load_data()
if cp in _character_names_data:
return _character_names_data[cp]
elif (cp,) in _emoji_sequence_data:
return _emoji_sequence_data[(cp,)][0]
elif args:
return args[0]
else:
raise Exception('no name for "%0x"' % ord(char))
def _char_to_int(char):
"""Converts a potential character to its scalar value."""
if type(char) in [str, type(u"")]:
return ord(char)
else:
return char
def derived_props():
load_data()
return frozenset(_core_properties_data.keys())
def chars_with_property(propname):
load_data()
return frozenset(_core_properties_data[propname])
def category(char):
"""Returns the general category of a character."""
load_data()
char = _char_to_int(char)
try:
return _general_category_data[char]
except KeyError:
return "Cn" # Unassigned
def combining(char):
"""Returns the canonical combining class of a character."""
load_data()
char = _char_to_int(char)
try:
return _combining_class_data[char]
except KeyError:
return 0
def to_upper(char):
"""Returns the upper case for a lower case character.
This is not full upper casing, but simply reflects the 1-1
mapping in UnicodeData.txt."""
load_data()
cp = _char_to_int(char)
try:
if _general_category_data[cp] == "Ll":
return unichr(_lower_to_upper_case[cp])
except KeyError:
pass
return char
def canonical_decomposition(char):
"""Returns the canonical decomposition of a character as a Unicode string."""
load_data()
char = _char_to_int(char)
try:
return _decomposition_data[char]
except KeyError:
return u""
def script(char):
"""Returns the script property of a character as a four-letter code."""
load_data()
char = _char_to_int(char)
try:
return _script_data[char]
except KeyError:
return "Zzzz" # Unknown
def script_extensions(char):
"""Returns the script extensions property of a character.
The return value is a frozenset of four-letter script codes.
"""
load_data()
char = _char_to_int(char)
try:
return _script_extensions_data[char]
except KeyError:
return frozenset([script(char)])
def block(char):
"""Returns the block property of a character."""
load_data()
char = _char_to_int(char)
try:
return _block_data[char]
except KeyError:
return "No_Block"
def block_range(block):
"""Returns a range (first, last) of the named block."""
load_data()
return _block_range[block]
def block_chars(block):
"""Returns a frozenset of the cps in the named block."""
load_data()
first, last = _block_range[block]
return frozenset(range(first, last + 1))
def block_names():
"""Returns the names of the blocks in block order."""
load_data()
return _block_names[:]
def age(char):
"""Returns the age property of a character as a string.
Returns None if the character is unassigned."""
load_data()
char = _char_to_int(char)
try:
return _age_data[char]
except KeyError:
return None
# Uniscribe treats these ignorables (Hangul fillers) as spacing.
UNISCRIBE_USED_IGNORABLES = frozenset([0x115F, 0x1160, 0x3164, 0xFFA0])
def is_default_ignorable(char):
"""Returns true if the character has the Default_Ignorable property."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return char in _core_properties_data["Default_Ignorable_Code_Point"]
def default_ignorables():
load_data()
return frozenset(_core_properties_data["Default_Ignorable_Code_Point"])
def is_defined(char):
"""Returns true if the character is defined in the Unicode Standard."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return char in _defined_characters
def is_private_use(char):
"""Returns true if the characters is a private use character."""
return category(char) == "Co"
def mirrored(char):
"""Returns 1 if the characters is bidi mirroring, 0 otherwise."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
return int(char in _bidi_mirroring_characters)
def bidi_mirroring_glyph(char):
"""Returns the bidi mirroring glyph property of a character."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
try:
return _bidi_mirroring_glyph_data[char]
except KeyError:
return None
def mirrored_chars():
return frozenset(_bidi_mirroring_glyph_data.keys())
def indic_positional_category(char):
"""Returns the Indic positional category of a character."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
try:
return _indic_positional_data[char]
except KeyError:
return "NA"
def indic_syllabic_category(char):
"""Returns the Indic syllabic category of a character."""
load_data()
if isinstance(char, (str, unicode)):
char = ord(char)
try:
return _bidi_syllabic_data[char]
except KeyError:
return "Other"
def create_script_to_chars():
"""Returns a mapping from script to defined characters, based on script and
extensions, for all scripts."""
load_data()
result = collections.defaultdict(set)
for cp in _defined_characters:
if cp in _script_data:
result[_script_data[cp]].add(cp)
if cp in _script_extensions_data:
for script in _script_extensions_data[cp]:
result[script].add(cp)
return result
_DEFINED_CHARACTERS_CACHE = {}
def defined_characters(version=None, scr=None):
"""Returns the set of all defined characters in the Unicode Standard."""
load_data()
# handle common error where version is passed as string, the age test
# will always pass
if version is not None:
version = float(version)
try:
return _DEFINED_CHARACTERS_CACHE[(version, scr)]
except KeyError:
pass
characters = _defined_characters
if version is not None:
characters = {
char
for char in characters
if age(char) is not None and float(age(char)) <= version
}
if scr is not None:
characters = {
char
for char in characters
if script(char) == scr or scr in script_extensions(char)
}
characters = frozenset(characters)
_DEFINED_CHARACTERS_CACHE[(version, scr)] = characters
return characters
_strip_re = re.compile(r"[-'_ ]+")
def _folded_script_name(script_name):
"""Folds a script name to its bare bones for comparison."""
# string.translate is changed by codecs, the method no longer takes two
# parameters and so script_name.translate(None, "'-_ ") fails to compile
return _strip_re.sub("", script_name).lower()
def script_code(script_name):
"""Returns the four-letter ISO 15924 code of a script from its long name."""
load_data()
folded_script_name = _folded_script_name(script_name)
try:
return _HARD_CODED_FOLDED_SCRIPT_NAME_TO_CODE[folded_script_name]
except:
return _folded_script_name_to_code.get(folded_script_name, "Zzzz")
# We use some standard script codes that are not assigned to a codepoint
# by unicode, e.g. Zsym. The data based off Scripts.txt doesn't contain
# these so we add them here. There are also a few names with punctuation
# that we special-case
_HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES = {
"Aran": "Nastaliq", # not assigned
"Nkoo": "N'Ko",
"Phag": "Phags-pa",
"Piqd": "Klingon", # not assigned
"Zmth": "Math", # not assigned
"Zsye": "Emoji", # not assigned
"Zsym": "Symbols", # not assigned
}
_HARD_CODED_FOLDED_SCRIPT_NAME_TO_CODE = {
_folded_script_name(name): code
for code, name in _HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES.items()
}
def human_readable_script_name(code):
"""Returns a human-readable name for the script code."""
try:
return _HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES[code]
except KeyError:
load_data()
return _script_code_to_long_name[code]
def all_scripts():
"""Return a frozenset of all four-letter script codes."""
load_data()
return frozenset(_script_code_to_long_name.keys())
_DATA_DIR_PATH = path.join(
path.abspath(path.dirname(__file__)), os.pardir, "third_party", "ucd"
)
def open_unicode_data_file(data_file_name):
"""Opens a Unicode data file.
Args:
data_file_name: A string containing the filename of the data file.
Returns:
A file handle to the data file.
"""
filename = path.join(_DATA_DIR_PATH, data_file_name)
return codecs.open(filename, "r", "utf-8")
def _parse_code_ranges(input_data):
"""Reads Unicode code ranges with properties from an input string.
Reads a Unicode data file already imported into a string. The format is
the typical Unicode data file format with either one character or a
range of characters separated by a semicolon with a property value (and
potentially comments after a number sign, that will be ignored).
Example source data file:
http://www.unicode.org/Public/UNIDATA/Scripts.txt
Example data:
0000..001F ; Common # Cc [32] <control-0000>..<control-001F>
0020 ; Common # Zs SPACE
Args:
input_data: An input string, containing the data.
Returns:
A list of tuples corresponding to the input data, with each tuple
containing the beginning of the range, the end of the range, and the
property value for the range. For example:
[(0, 31, 'Common'), (32, 32, 'Common')]
"""
ranges = []
line_regex = re.compile(
r"^"
r"([0-9A-F]{4,6})" # first character code
r"(?:\.\.([0-9A-F]{4,6}))?" # optional second character code
r"\s*;\s*"
r"([^#]+)"
) # the data, up until the potential comment
for line in input_data.split("\n"):
match = line_regex.match(line)
if not match:
continue
first, last, data = match.groups()
if last is None:
last = first
first = int(first, 16)
last = int(last, 16)
data = data.rstrip()
ranges.append((first, last, data))
return ranges
def _parse_semicolon_separated_data(input_data):
"""Reads semicolon-separated Unicode data from an input string.
Reads a Unicode data file already imported into a string. The format is
the Unicode data file format with a list of values separated by
semicolons. The number of the values on different lines may be different
from another.
Example source data file:
http://www.unicode.org/Public/UNIDATA/PropertyValueAliases.txt
Example data:
sc; Cher ; Cherokee
sc; Copt ; Coptic ; Qaac
Args:
input_data: An input string, containing the data.
Returns:
A list of lists corresponding to the input data, with each individual
list containing the values as strings. For example:
[['sc', 'Cher', 'Cherokee'], ['sc', 'Copt', 'Coptic', 'Qaac']]
"""
all_data = []
for line in input_data.split("\n"):
line = line.split("#", 1)[0].strip() # remove the comment
if not line:
continue
fields = line.split(";")
fields = [field.strip() for field in fields]
all_data.append(fields)
return all_data
def _load_unicode_data_txt():
"""Load character data from UnicodeData.txt."""
global _defined_characters
global _bidi_mirroring_characters
if _defined_characters:
return
with open_unicode_data_file("UnicodeData.txt") as unicode_data_txt:
unicode_data = _parse_semicolon_separated_data(unicode_data_txt.read())
for line in unicode_data:
code = int(line[0], 16)
char_name = line[1]
general_category = line[2]
combining_class = int(line[3])
decomposition = line[5]
if decomposition.startswith("<"):
# We only care about canonical decompositions
decomposition = ""
decomposition = decomposition.split()
decomposition = [unichr(int(char, 16)) for char in decomposition]
decomposition = "".join(decomposition)
bidi_mirroring = line[9] == "Y"
if general_category == "Ll":
upcode = line[12]
if upcode:
upper_case = int(upcode, 16)
_lower_to_upper_case[code] = upper_case
if char_name.endswith("First>"):
last_range_opener = code
elif char_name.endswith("Last>"):
# Ignore surrogates
if "Surrogate" not in char_name:
for char in range(last_range_opener, code + 1):
_general_category_data[char] = general_category
_combining_class_data[char] = combining_class
if bidi_mirroring:
_bidi_mirroring_characters.add(char)
_defined_characters.add(char)
else:
_character_names_data[code] = char_name
_general_category_data[code] = general_category
_combining_class_data[code] = combining_class
if bidi_mirroring:
_bidi_mirroring_characters.add(code)
_decomposition_data[code] = decomposition
_defined_characters.add(code)
_defined_characters = frozenset(_defined_characters)
_bidi_mirroring_characters = frozenset(_bidi_mirroring_characters)
def _load_scripts_txt():
"""Load script property from Scripts.txt."""
with open_unicode_data_file("Scripts.txt") as scripts_txt:
script_ranges = _parse_code_ranges(scripts_txt.read())
for first, last, script_name in script_ranges:
folded_script_name = _folded_script_name(script_name)
script = _folded_script_name_to_code[folded_script_name]
for char_code in range(first, last + 1):
_script_data[char_code] = script
def _load_script_extensions_txt():
"""Load script property from ScriptExtensions.txt."""
with open_unicode_data_file("ScriptExtensions.txt") as se_txt:
script_extensions_ranges = _parse_code_ranges(se_txt.read())
for first, last, script_names in script_extensions_ranges:
script_set = frozenset(script_names.split(" "))
for character_code in range(first, last + 1):
_script_extensions_data[character_code] = script_set
def _load_blocks_txt():
"""Load block name from Blocks.txt."""
with open_unicode_data_file("Blocks.txt") as blocks_txt:
block_ranges = _parse_code_ranges(blocks_txt.read())
for first, last, block_name in block_ranges:
_block_names.append(block_name)
_block_range[block_name] = (first, last)
for character_code in range(first, last + 1):
_block_data[character_code] = block_name
def _load_derived_age_txt():
"""Load age property from DerivedAge.txt."""
with open_unicode_data_file("DerivedAge.txt") as derived_age_txt:
age_ranges = _parse_code_ranges(derived_age_txt.read())
for first, last, char_age in age_ranges:
for char_code in range(first, last + 1):
_age_data[char_code] = char_age
def _load_derived_core_properties_txt():
"""Load derived core properties from Blocks.txt."""
with open_unicode_data_file("DerivedCoreProperties.txt") as dcp_txt:
dcp_ranges = _parse_code_ranges(dcp_txt.read())
for first, last, property_name in dcp_ranges:
for character_code in range(first, last + 1):
try:
_core_properties_data[property_name].add(character_code)
except KeyError:
_core_properties_data[property_name] = {character_code}
def _load_property_value_aliases_txt():
"""Load property value aliases from PropertyValueAliases.txt."""
with open_unicode_data_file("PropertyValueAliases.txt") as pva_txt:
aliases = _parse_semicolon_separated_data(pva_txt.read())
for data_item in aliases:
if data_item[0] == "sc": # Script
code = data_item[1]
long_name = data_item[2]
_script_code_to_long_name[code] = long_name.replace("_", " ")
folded_name = _folded_script_name(long_name)
_folded_script_name_to_code[folded_name] = code
def _load_bidi_mirroring_txt():
"""Load bidi mirroring glyphs from BidiMirroring.txt."""
with open_unicode_data_file("BidiMirroring.txt") as bidi_mirroring_txt:
bmg_pairs = _parse_semicolon_separated_data(bidi_mirroring_txt.read())
for char, bmg in bmg_pairs:
char = int(char, 16)
bmg = int(bmg, 16)
_bidi_mirroring_glyph_data[char] = bmg
def _load_indic_data():
"""Load Indic properties from Indic(Positional|Syllabic)Category.txt."""
with open_unicode_data_file("IndicPositionalCategory.txt") as inpc_txt:
positional_ranges = _parse_code_ranges(inpc_txt.read())
for first, last, char_position in positional_ranges:
for char_code in range(first, last + 1):
_indic_positional_data[char_code] = char_position
with open_unicode_data_file("IndicSyllabicCategory.txt") as insc_txt:
syllabic_ranges = _parse_code_ranges(insc_txt.read())
for first, last, char_syllabic_category in syllabic_ranges:
for char_code in range(first, last + 1):
_indic_syllabic_data[char_code] = char_syllabic_category
def _load_emoji_data():
"""Parse the new draft format of emoji-data.txt"""
global _presentation_default_emoji, _presentation_default_text
global _emoji, _emoji_modifier_base
if _presentation_default_emoji:
return
emoji_sets = {
"Emoji": set(),
"Emoji_Presentation": set(),
"Emoji_Modifier": set(),
"Emoji_Modifier_Base": set(),
"Extended_Pictographic": set(),
"Emoji_Component": set(),
}
set_names = "|".join(sorted(emoji_sets.keys()))
line_re = re.compile(
r"([0-9A-F]{4,6})(?:\.\.([0-9A-F]{4,6}))?\s*;\s*" r"(%s)\s*#.*$" % set_names
)
with open_unicode_data_file("emoji-data.txt") as f:
for line in f:
line = line.strip()
if not line or line[0] == "#":
continue
m = line_re.match(line)
if not m:
raise ValueError('Did not match "%s"' % line)
start = int(m.group(1), 16)
end = start if not m.group(2) else int(m.group(2), 16)
emoji_set = emoji_sets.get(m.group(3))
emoji_set.update(range(start, end + 1))
# allow our legacy use of handshake and wrestlers with skin tone modifiers
emoji_sets["Emoji_Modifier_Base"] |= {0x1F91D, 0x1F93C}
_presentation_default_emoji = frozenset(emoji_sets["Emoji_Presentation"])
_presentation_default_text = frozenset(
emoji_sets["Emoji"] - emoji_sets["Emoji_Presentation"]
)
_emoji_modifier_base = frozenset(emoji_sets["Emoji_Modifier_Base"])
_emoji = frozenset(emoji_sets["Emoji"])
# we have no real use for the 'Emoji_Regional_Indicator' and
# 'Emoji_Component' sets, and they're not documented, so ignore them.
# The regional indicator set is just the 26 regional indicator
# symbols, and the component set is number sign, asterisk, ASCII digits,
# the regional indicators, and the skin tone modifiers.
PROPOSED_EMOJI_AGE = 1000.0
ZWJ = 0x200D
EMOJI_VS = 0xFE0F
EMOJI_SEQUENCE_TYPES = frozenset(
[
"Basic_Emoji",
"Emoji_Keycap_Sequence",
"Emoji_Combining_Sequence",
"Emoji_Flag_Sequence",
"RGI_Emoji_Flag_Sequence",
"RGI_Emoji_Tag_Sequence",
"Emoji_Modifier_Sequence",
"RGI_Emoji_Modifier_Sequence",
"RGI_Emoji_ZWJ_Sequence",
"Emoji_ZWJ_Sequence",
"Emoji_Single_Sequence",
]
)
# Unicode 12 decided to be 'helpful' and included single emoji in the sequence
# data, but unlike all the other data represents these in batches as XXXX..XXXX
# rather than one per line. We can't get name data for these so we can't
# use that data, but still have to parse the line.
def _read_emoji_data(lines):
"""Parse lines of emoji data and return a map from sequence to tuples of
name, age, type."""
line_re = re.compile(
r"(?:([0-9A-F ]+)|([0-9A-F]+\.\.[0-9A-F]+)\s*);\s*(%s)\s*;\s*([^#]*)\s*#\s*E?(\d+\.\d+).*"
% "|".join(EMOJI_SEQUENCE_TYPES)
)
result = {}
for line in lines:
line = line.strip()
if not line or line[0] == "#":
continue
m = line_re.match(line)
if not m:
raise ValueError('"%s" Did not match "%s"' % (line_re.pattern, line))
# group 1 is a sequence, group 2 is a range of single character sequences.
# we can't process the range because we don't have a name for each character
# in the range, so skip it and get these emoji and their names from
# UnicodeData
if m.group(2):
continue
seq_type = m.group(3).strip().encode("ascii")
seq = tuple(int(s, 16) for s in m.group(1).split())
name = m.group(4).strip()
age = float(m.group(5))
result[seq] = (name, age, seq_type)
return result
def _read_emoji_data_file(filename):
with open_unicode_data_file(filename) as f:
return _read_emoji_data(f.readlines())
_EMOJI_QUAL_TYPES = [
"component",
"fully-qualified",
"minimally-qualified",
"unqualified",
]
def _read_emoji_test_data(data_string):
"""Parse the emoji-test.txt data. This has names of proposed emoji that are
not yet in the full Unicode data file. Returns a list of tuples of
sequence, group, subgroup, name.
The data is a string."""
line_re = re.compile(
r"([0-9a-fA-F ]+)\s*;\s*(%s)\s*#\s*(?:[^\s]+)\s+(.*)\s*"
% "|".join(_EMOJI_QUAL_TYPES)
)
result = []
GROUP_PREFIX = "# group: "
SUBGROUP_PREFIX = "# subgroup: "
group = None
subgroup = None
for line in data_string.splitlines():
line = line.strip()
if not line:
continue
if line[0] == "#":
if line.startswith(GROUP_PREFIX):
group = line[len(GROUP_PREFIX) :].strip().encode("ascii")
subgroup = None
elif line.startswith(SUBGROUP_PREFIX):
subgroup = line[len(SUBGROUP_PREFIX) :].strip().encode("ascii")
continue
m = line_re.match(line)
if not m:
raise ValueError('Did not match "%s" in emoji-test.txt' % line)
if m.group(2) not in ["component", "fully-qualified"]:
# We only want component and fully-qualified sequences, as those are
# 'canonical'. 'minimally-qualified' apparently just leave off the
# FEOF emoji presentation tag, we already assume these.
# Information for the unqualified sequences should be
# redundant. At the moment we don't verify this so if the file
# changes we won't catch that.
continue
seq = tuple(int(s, 16) for s in m.group(1).split())
name = m.group(3).strip()
if not (group and subgroup):
raise Exception(
"sequence %s missing group or subgroup" % seq_to_string(seq)
)
result.append((seq, group, subgroup, name))
return result
_SUPPLEMENTAL_EMOJI_GROUP_DATA = """
# group: Misc
# subgroup: used with keycaps
0023 fe0f ; fully-qualified # ? number sign
002a fe0f ; fully-qualified # ? asterisk
0030 fe0f ; fully-qualified # ? digit zero
0031 fe0f ; fully-qualified # ? digit one
0032 fe0f ; fully-qualified # ? digit two
0033 fe0f ; fully-qualified # ? digit three
0034 fe0f ; fully-qualified # ? digit four
0035 fe0f ; fully-qualified # ? digit five
0036 fe0f ; fully-qualified # ? digit six
0037 fe0f ; fully-qualified # ? digit seven
0038 fe0f ; fully-qualified # ? digit eight
0039 fe0f ; fully-qualified # ? digit nine
20e3 ; fully-qualified # ? combining enclosing keycap
# As of Unicode 11 these have group data defined.
# subgroup: skin-tone modifiers
#1f3fb ; fully-qualified # ? emoji modifier fitzpatrick type-1-2
#1f3fc ; fully-qualified # ? emoji modifier fitzpatrick type-3
#1f3fd ; fully-qualified # ? emoji modifier fitzpatrick type-4
#1f3fe ; fully-qualified # ? emoji modifier fitzpatrick type-5
#1f3ff ; fully-qualified # ? emoji modifier fitzpatrick type-6
# subgroup: regional indicator symbols
1f1e6 ; fully-qualified # ? regional indicator symbol letter A
1f1e7 ; fully-qualified # ? regional indicator symbol letter B
1f1e8 ; fully-qualified # ? regional indicator symbol letter C
1f1e9 ; fully-qualified # ? regional indicator symbol letter D
1f1ea ; fully-qualified # ? regional indicator symbol letter E
1f1eb ; fully-qualified # ? regional indicator symbol letter F
1f1ec ; fully-qualified # ? regional indicator symbol letter G
1f1ed ; fully-qualified # ? regional indicator symbol letter H
1f1ee ; fully-qualified # ? regional indicator symbol letter I
1f1ef ; fully-qualified # ? regional indicator symbol letter J
1f1f0 ; fully-qualified # ? regional indicator symbol letter K
1f1f1 ; fully-qualified # ? regional indicator symbol letter L
1f1f2 ; fully-qualified # ? regional indicator symbol letter M
1f1f3 ; fully-qualified # ? regional indicator symbol letter N
1f1f4 ; fully-qualified # ? regional indicator symbol letter O
1f1f5 ; fully-qualified # ? regional indicator symbol letter P
1f1f6 ; fully-qualified # ? regional indicator symbol letter Q
1f1f7 ; fully-qualified # ? regional indicator symbol letter R
1f1f8 ; fully-qualified # ? regional indicator symbol letter S
1f1f9 ; fully-qualified # ? regional indicator symbol letter T
1f1fa ; fully-qualified # ? regional indicator symbol letter U
1f1fb ; fully-qualified # ? regional indicator symbol letter V
1f1fc ; fully-qualified # ? regional indicator symbol letter W
1f1fd ; fully-qualified # ? regional indicator symbol letter X
1f1fe ; fully-qualified # ? regional indicator symbol letter Y
1f1ff ; fully-qualified # ? regional indicator symbol letter Z
#subgroup: unknown flag
fe82b ; fully-qualified # ? unknown flag PUA codepoint
"""
# These are skin tone sequences that Unicode decided not to define. Android
# shipped with them, so we're stuck with them forever regardless of what
# Unicode says.
#
# This data is in the format of emoji-sequences.txt and emoji-zwj-sequences.txt
_LEGACY_ANDROID_SEQUENCES = """
1F93C 1F3FB ; Emoji_Modifier_Sequence ; people wrestling: light skin tone # 9.0
1F93C 1F3FC ; Emoji_Modifier_Sequence ; people wrestling: medium-light skin tone # 9.0
1F93C 1F3FD ; Emoji_Modifier_Sequence ; people wrestling: medium skin tone # 9.0
1F93C 1F3FE ; Emoji_Modifier_Sequence ; people wrestling: medium-dark skin tone # 9.0
1F93C 1F3FF ; Emoji_Modifier_Sequence ; people wrestling: dark skin tone # 9.0
1F93C 1F3FB 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: light skin tone # 9.0
1F93C 1F3FC 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: medium-light skin tone # 9.0
1F93C 1F3FD 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: medium skin tone # 9.0
1F93C 1F3FE 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: medium-dark skin tone # 9.0
1F93C 1F3FF 200D 2642 FE0F ; Emoji_ZWJ_Sequence ; men wrestling: dark skin tone # 9.0
1F93C 1F3FB 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: light skin tone # 9.0
1F93C 1F3FC 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: medium-light skin tone # 9.0
1F93C 1F3FD 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: medium skin tone # 9.0
1F93C 1F3FE 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: medium-dark skin tone # 9.0
1F93C 1F3FF 200D 2640 FE0F ; Emoji_ZWJ_Sequence ; women wrestling: dark skin tone # 9.0
"""
# Defines how to insert the new sequences into the standard order data. Would
# have been nice to merge it into the above legacy data but that would have
# required a format change.
_LEGACY_ANDROID_ORDER = """
-1F93C # people wrestling
1F93C 1F3FB
1F93C 1F3FC
1F93C 1F3FD
1F93C 1F3FE
1F93C 1F3FF
-1F93C 200D 2642 FE0F # men wrestling
1F93C 1F3FB 200D 2642 FE0F
1F93C 1F3FC 200D 2642 FE0F
1F93C 1F3FD 200D 2642 FE0F
1F93C 1F3FE 200D 2642 FE0F
1F93C 1F3FF 200D 2642 FE0F
-1F93C 200D 2640 FE0F # women wrestling
1F93C 1F3FB 200D 2640 FE0F
1F93C 1F3FC 200D 2640 FE0F
1F93C 1F3FD 200D 2640 FE0F
1F93C 1F3FE 200D 2640 FE0F
1F93C 1F3FF 200D 2640 FE0F
"""
def _get_order_patch(order_text, seq_to_name):
"""Create a mapping from a key sequence to a list of sequence, name tuples.
This will be used to insert additional sequences after the key sequence
in the order data. seq_to_name is a mapping from new sequence to name,
so the names don't have to be duplicated in the order data."""
patch_map = {}
patch_key = None
patch_list = None
def get_sequence(seqtext):
return tuple([int(s, 16) for s in seqtext.split()])
for line in order_text.splitlines():
ix = line.find("#")
if ix >= 0:
line = line[:ix]
line = line.strip()
if not line:
continue
if line.startswith("-"):
if patch_list and patch_key:
patch_map[patch_key] = patch_list
patch_key = get_sequence(line[1:])
patch_list = []
else:
seq = get_sequence(line)
name = seq_to_name[seq] # exception if seq is not in sequence_text
patch_list.append((seq, name))
if patch_list and patch_key:
patch_map[patch_key] = patch_list
return patch_map
def _get_android_order_patch():
"""Get an order patch using the legacy android data."""
# maps from sequence to (name, age, type), we only need the name
seq_data = _read_emoji_data(_LEGACY_ANDROID_SEQUENCES.splitlines())
seq_to_name = {k: v[0] for k, v in seq_data.items()}
return _get_order_patch(_LEGACY_ANDROID_ORDER, seq_to_name)
def _apply_order_patch(patch, group_list):
"""patch is a map from a key sequence to list of sequence, name pairs, and
group_list is an ordered list of sequence, group, subgroup, name tuples.
Iterate through the group list appending each item to a new list, and
after appending an item matching a key sequence, also append all of its
associated sequences in order using the same group and subgroup.
Return the new list. If there are any unused patches, raise an exception."""
result = []
patched = set()
for t in group_list:
result.append(t)
if t[0] in patch:
patched.add(t[0])
_, group, subgroup, _ = t
for seq, name in patch[t[0]]:
result.append((seq, group, subgroup, name))
unused = set(patch.keys()) - patched
if unused:
raise Exception(
"%d unused patch%s\n %s: "
% (
len(unused),
"" if len(unused) == 1 else "es",
"\n ".join(seq_to_string(seq) for seq in sorted(unused)),
)
)
return result
def _load_emoji_group_data():
global _emoji_group_data
if _emoji_group_data:
return
_emoji_group_data = {}
with open_unicode_data_file("emoji-test.txt") as f:
text = f.read()
group_list = _read_emoji_test_data(text)
# patch with android items
patch = _get_android_order_patch()
group_list = _apply_order_patch(patch, group_list)
group_list.extend(_read_emoji_test_data(_SUPPLEMENTAL_EMOJI_GROUP_DATA))
for i, (seq, group, subgroup, name) in enumerate(group_list):
if seq in _emoji_group_data:
print(
"seq %s already in group data as %s"
% (seq_to_string(seq), _emoji_group_data[seq])
)
print(" new value would be %s" % str((i, group, subgroup, name)))
_emoji_group_data[seq] = (i, group, subgroup, name)
assert len(group_list) == len(_emoji_group_data)
def get_emoji_group_data(seq):
"""Return group data for the canonical sequence seq, or None.
Group data is a tuple of index, group, subgroup, and name. The
index is a unique global sort index for the sequence among all
sequences in the group data."""
_load_emoji_group_data()
return _emoji_group_data.get(seq, None)
def get_emoji_groups():
"""Return the main emoji groups, in order."""
_load_emoji_group_data()
groups = []
group = None
for _, g, _, _ in sorted(_emoji_group_data.values()):
if g != group:
group = g
groups.append(group)
return groups
def get_emoji_subgroups(group):
"""Return the subgroups of this group, in order, or None
if the group is not recognized."""
_load_emoji_group_data()
subgroups = []
subgroup = None
for _, g, sg, _ in sorted(_emoji_group_data.values()):
if g == group:
if sg != subgroup:
subgroup = sg
subgroups.append(subgroup)
return subgroups if subgroups else None
def get_emoji_in_group(group, subgroup=None):
"""Return the sorted list of the emoji sequences in the group (limiting to
subgroup if subgroup is not None). Returns None if group does not
exist, and an empty list if subgroup does not exist in group."""
_load_emoji_group_data()
result = None
for seq, (index, g, sg, _) in _emoji_group_data.items():
if g == group:
if result is None:
result = []
if subgroup and sg != subgroup:
continue
result.append(seq)
result.sort(key=lambda s: _emoji_group_data[s][0])
return result
def get_sorted_emoji_sequences(seqs):
"""Seqs is a collection of canonical emoji sequences. Returns a list of
these sequences in the canonical emoji group order. Sequences that are not
canonical are placed at the end, in unicode code point order.
"""
_load_emoji_group_data()
return sorted(seqs, key=lambda s: (_emoji_group_data.get(s, 100000), s))
def _load_emoji_sequence_data():
"""Ensure the emoji sequence data is initialized."""
global _emoji_sequence_data, _emoji_non_vs_to_canonical
if _emoji_sequence_data is not None:
return
_emoji_sequence_data = {}
_emoji_non_vs_to_canonical = {}
def add_data(data):
for k, t in data.items():
if k in _emoji_sequence_data:
print("already have data for sequence:", seq_to_string(k), t)
_emoji_sequence_data[k] = t
if EMOJI_VS in k:
_emoji_non_vs_to_canonical[strip_emoji_vs(k)] = k
for datafile in ["emoji-zwj-sequences.txt", "emoji-sequences.txt"]:
add_data(_read_emoji_data_file(datafile))
add_data(_read_emoji_data(_LEGACY_ANDROID_SEQUENCES.splitlines()))
_load_unicode_data_txt() # ensure character_names_data is populated
_load_emoji_data() # ensure presentation_default_text is populated
_load_emoji_group_data() # ensure group data is populated
# Get names for single emoji from the test data. We will prefer these over
# those in UnicodeData (e.g. prefer "one o'clock" to "clock face one oclock"),
# and if they're not in UnicodeData these are proposed new emoji.
for seq, (_, _, _, emoji_name) in _emoji_group_data.items():
non_vs_seq = strip_emoji_vs(seq)
if len(non_vs_seq) > 1:
continue
cp = non_vs_seq[0]
# If it's not in character names data, it's a proposed emoji.
if cp not in _character_names_data:
# use 'ignore' to strip curly quotes etc if they exist, unicode
# character names are ASCII, and it's probably best to keep it that way.
cp_name = emoji_name.encode("ascii", "ignore").upper()
_character_names_data[cp] = cp_name
is_default_text_presentation = cp in _presentation_default_text
if is_default_text_presentation:
seq = (cp, EMOJI_VS)
emoji_age = age(cp)
if emoji_age is not None:
emoji_age = float(emoji_age)
emoji_age = PROPOSED_EMOJI_AGE
current_data = _emoji_sequence_data.get(seq) or (
emoji_name,
emoji_age,
"Emoji_Single_Sequence",
)
if is_default_text_presentation:
emoji_name = "(emoji) " + emoji_name
_emoji_sequence_data[seq] = (emoji_name, current_data[1], current_data[2])
# Fill in sequences of single emoji, handling non-canonical to canonical also.
for k in _emoji:
non_vs_seq = (k,)
is_default_text_presentation = k in _presentation_default_text
if is_default_text_presentation:
canonical_seq = (k, EMOJI_VS)
_emoji_non_vs_to_canonical[non_vs_seq] = canonical_seq
else:
canonical_seq = non_vs_seq
if canonical_seq in _emoji_sequence_data:
# Prefer names we have where they exist
emoji_name, emoji_age, seq_type = _emoji_sequence_data[canonical_seq]
else:
emoji_name = name(k, "unnamed").lower()
if name == "unnamed":
continue
emoji_age = age(k)
seq_type = "Emoji_Single_Sequence"
if is_default_text_presentation and not emoji_name.startswith("(emoji) "):
emoji_name = "(emoji) " + emoji_name
_emoji_sequence_data[canonical_seq] = (emoji_name, emoji_age, seq_type)
def get_emoji_sequences(age=None, types=None):
"""Return the set of canonical emoji sequences, filtering to those <= age
if age is not None, and those with type in types (if not a string) or
type == types (if type is a string) if types is not None. By default
all sequences are returned, including those for single emoji."""
_load_emoji_sequence_data()
result = _emoji_sequence_data.keys()
if types is not None:
if isinstance(types, basestring):
types = frozenset([types])
result = [k for k in result if _emoji_sequence_data[k][1] in types]
if age is not None:
age = float(age)
result = [k for k in result if _emoji_sequence_data[k][0] <= age]
return result
def get_emoji_sequence_data(seq):
"""Return a tuple of the name, age, and type for the (possibly non-canonical)
sequence, or None if not recognized as a sequence."""
_load_emoji_sequence_data()
seq = get_canonical_emoji_sequence(seq)
if not seq or seq not in _emoji_sequence_data:
return None
return _emoji_sequence_data[seq]
def get_emoji_sequence_name(seq):
"""Return the name of the (possibly non-canonical) sequence, or None if
not recognized as a sequence."""
data = get_emoji_sequence_data(seq)
return None if not data else data[0]
def get_emoji_sequence_age(seq):
"""Return the age of the (possibly non-canonical) sequence, or None if
not recognized as a sequence. Proposed sequences have PROPOSED_EMOJI_AGE
as the age."""
# floats are a pain since the actual values are decimal. maybe use
# strings to represent age.
data = get_emoji_sequence_data(seq)
return None if not data else data[1]
def get_emoji_sequence_type(seq):
"""Return the type of the (possibly non-canonical) sequence, or None if
not recognized as a sequence. Types are in EMOJI_SEQUENCE_TYPES."""
data = get_emoji_sequence_data(seq)
return None if not data else data[2]
def is_canonical_emoji_sequence(seq):
"""Return true if this is a canonical emoji sequence (has 'vs' where Unicode
says it should), and is known."""
_load_emoji_sequence_data()
return seq in _emoji_sequence_data
def get_canonical_emoji_sequence(seq):
"""Return the canonical version of this emoji sequence if the sequence is
known, or None."""
if is_canonical_emoji_sequence(seq):
return seq
seq = strip_emoji_vs(seq)
return _emoji_non_vs_to_canonical.get(seq, None)
def strip_emoji_vs(seq):
"""Return a version of this emoji sequence with emoji variation selectors
stripped. This is the 'non-canonical' version used by the color emoji font,
which doesn't care how the sequence is represented in text."""
if EMOJI_VS in seq:
return tuple([cp for cp in seq if cp != EMOJI_VS])
return seq
def seq_to_string(seq):
"""Return a string representation of the codepoint sequence."""
return "_".join("%04x" % cp for cp in seq)
def string_to_seq(seq_str):
"""Return a codepoint sequence (tuple) given its string representation."""
return tuple([int(s, 16) for s in seq_str.split("_")])
def is_cp_seq(seq):
return all(0 <= n <= 0x10FFFF for n in seq)
_REGIONAL_INDICATOR_START = 0x1F1E6
_REGIONAL_INDICATOR_END = 0x1F1FF
def is_regional_indicator(cp):
return _REGIONAL_INDICATOR_START <= cp <= _REGIONAL_INDICATOR_END
def is_regional_indicator_seq(cps):
return len(cps) == 2 and all(is_regional_indicator(cp) for cp in cps)
def regional_indicator_to_ascii(cp):
assert is_regional_indicator(cp)
return chr(cp - _REGIONAL_INDICATOR_START + ord("A"))
def ascii_to_regional_indicator(ch):
assert "A" <= ch <= "Z"
return ord(ch) - ord("A") + _REGIONAL_INDICATOR_START
def string_to_regional_indicator_seq(s):
assert len(s) == 2
return ascii_to_regional_indicator(s[0]), ascii_to_regional_indicator(s[1])
def regional_indicator_seq_to_string(cps):
assert len(cps) == 2
return "".join(regional_indicator_to_ascii(cp) for cp in cps)
def is_tag(cp):
return 0xE0020 < cp < 0xE0080 or cp == 0xE0001
def tag_character_to_ascii(cp):
assert is_tag(cp)
if cp == 0xE0001:
return "[begin]"
if cp == 0xE007F:
return "[end]"
return chr(cp - 0xE0000)
def is_regional_tag_seq(seq):
return (
seq[0] == 0x1F3F4
and seq[-1] == 0xE007F
and all(0xE0020 < cp < 0xE007E for cp in seq[1:-1])
)
_FITZ_START = 0x1F3FB
_FITZ_END = 0x1F3FF
def is_skintone_modifier(cp):
return _FITZ_START <= cp <= _FITZ_END
def get_presentation_default_emoji():
_load_emoji_data()
return _presentation_default_emoji
def get_presentation_default_text():
_load_emoji_data()
return _presentation_default_text
def get_emoji():
_load_emoji_data()
return _emoji
def is_emoji(cp):
_load_emoji_data()
return cp in _emoji
def is_emoji_modifier_base(cp):
_load_emoji_data()
return cp in _emoji_modifier_base
def _load_unicode_emoji_variants():
"""Parse StandardizedVariants.txt and initialize a set of characters
that have a defined emoji variant presentation. All such characters
also have a text variant presentation so a single set works for both."""
global _emoji_variants, _emoji_variants_proposed
if _emoji_variants:
return
emoji_variants = set()
# prior to Unicode 11 emoji variants were part of the standard data.
# as of Unicode 11 however they're only in a separate emoji data file.
line_re = re.compile(r"([0-9A-F]{4,6})\s+FE0F\s*;\s*emoji style\s*;")
with open_unicode_data_file("emoji-variation-sequences.txt") as f:
for line in f:
m = line_re.match(line)
if m:
emoji_variants.add(int(m.group(1), 16))
_emoji_variants = frozenset(emoji_variants)
try:
read = 0
skipped = 0
with open_unicode_data_file("proposed-variants.txt") as f:
for line in f:
m = line_re.match(line)
if m:
read += 1
cp = int(m.group(1), 16)
if cp in emoji_variants:
skipped += 1
else:
emoji_variants.add(cp)
print(
"skipped %s %d proposed variants"
% ("all of" if skipped == read else skipped, read)
)
except IOError as e:
if e.errno != 2:
raise
_emoji_variants_proposed = frozenset(emoji_variants)
def get_unicode_emoji_variants(include_proposed="proposed"):
"""Returns the emoji characters that have both emoji and text presentations.
If include_proposed is 'proposed', include the ones proposed in 2016/08. If
include_proposed is 'proposed_extra', also include the emoji Noto proposes
for text presentation treatment to align related characters. Else
include_proposed should resolve to boolean False."""
_load_unicode_emoji_variants()
if not include_proposed:
return _emoji_variants
elif include_proposed == "proposed":
return _emoji_variants_proposed
elif include_proposed == "proposed_extra":
extra = tool_utils.parse_int_ranges("1f4b9 1f4c8-1f4ca 1f507 1f509-1f50a 1f44c")
return _emoji_variants_proposed | extra
else:
raise Exception(
"include_proposed is %s which is not in ['proposed', 'proposed_extra']"
% include_proposed
)
def _load_variant_data():
"""Parse StandardizedVariants.txt and initialize all non-emoji variant
data. The data is a mapping from codepoint to a list of tuples of:
- variant selector
- compatibility character (-1 if there is none)
- shaping context (bitmask, 1 2 4 8 for isolate initial medial final)
The compatibility character is for cjk mappings that map to 'the same'
glyph as another CJK character."""
global _variant_data, _variant_data_cps
if _variant_data:
return
compatibility_re = re.compile(r"\s*CJK COMPATIBILITY IDEOGRAPH-([0-9A-Fa-f]+)")
variants = collections.defaultdict(list)
with open_unicode_data_file("StandardizedVariants.txt") as f:
for line in f:
x = line.find("#")
if x >= 0:
line = line[:x]
line = line.strip()
if not line:
continue
tokens = line.split(";")
cp, var = tokens[0].split(" ")
cp = int(cp, 16)
varval = int(var, 16)
if varval in [0xFE0E, 0xFE0F]:
continue # ignore emoji variants
m = compatibility_re.match(tokens[1].strip())
compat = int(m.group(1), 16) if m else -1
context = 0
if tokens[2]:
ctx = tokens[2]
if ctx.find("isolate") != -1:
context += 1
if ctx.find("initial") != -1:
context += 2
if ctx.find("medial") != -1:
context += 4
if ctx.find("final") != -1:
context += 8
variants[cp].append((varval, compat, context))
_variant_data_cps = frozenset(variants.keys())
_variant_data = variants
def has_variant_data(cp):
_load_variant_data()
return cp in _variant_data
def get_variant_data(cp):
_load_variant_data()
return _variant_data[cp][:] if cp in _variant_data else None
def variant_data_cps():
_load_variant_data()
return _variant_data_cps
# proposed emoji
def _load_proposed_emoji_data():
"""Parse proposed-emoji.txt if it exists to get cps/names of proposed emoji
(but not approved) for this version of Unicode."""
global _proposed_emoji_data, _proposed_emoji_data_cps
if _proposed_emoji_data:
return
_proposed_emoji_data = {}
line_re = re.compile(r"^U\+([a-zA-z0-9]{4,5})\s.*\s\d{4}Q\d\s+(.*)$")
try:
with open_unicode_data_file("proposed-emoji.txt") as f:
for line in f:
line = line.strip()
if not line or line[0] == "#" or line.startswith(u"\u2022"):
continue
m = line_re.match(line)
if not m:
raise ValueError('did not match "%s"' % line)
cp = int(m.group(1), 16)
name = m.group(2)
if cp in _proposed_emoji_data:
raise ValueError(
"duplicate emoji %x, old name: %s, new name: %s"
% (cp, _proposed_emoji_data[cp], name)
)
_proposed_emoji_data[cp] = name
except IOError as e:
if e.errno != 2:
# not file not found, rethrow
raise
_proposed_emoji_data_cps = frozenset(_proposed_emoji_data.keys())
def proposed_emoji_name(cp):
_load_proposed_emoji_data()
return _proposed_emoji_data.get(cp, "")
def proposed_emoji_cps():
_load_proposed_emoji_data()
return _proposed_emoji_data_cps
def is_proposed_emoji(cp):
_load_proposed_emoji_data()
return cp in _proposed_emoji_data_cps
def read_codeset(text):
line_re = re.compile(r"^0x([0-9a-fA-F]{2,6})\s+0x([0-9a-fA-F]{4,6})\s+.*")
codeset = set()
for line in text.splitlines():
m = line_re.match(line)
if m:
cp = int(m.group(2), 16)
codeset.add(cp)
return codeset
def codeset(cpname):
"""Return a set of the unicode codepoints in the code page named cpname, or
None."""
filename = ("%s.txt" % cpname).upper()
filepath = path.join(
path.dirname(__file__), os.pardir, "third_party", "unicode", filename
)
if not path.isfile(filepath):
return None
with open(filepath, "r") as f:
return read_codeset(f.read())
def _dump_emoji_presentation():
"""Dump presentation info, for testing."""
text_p = 0
emoji_p = 0
for cp in sorted(get_emoji()):
cp_name = name(cp, "<error>")
if cp in get_presentation_default_emoji():
presentation = "emoji"
emoji_p += 1
elif cp in get_presentation_default_text():
presentation = "text"
text_p += 1
else:
presentation = "<error>"
print(
"%s%04x %5s %s" % (" " if cp < 0x10000 else "", cp, presentation, cp_name)
)
print(
"%d total emoji, %d text presentation, %d emoji presentation"
% (len(get_emoji()), text_p, emoji_p)
)
def _load_nameslist_data():
global _nameslist_see_also
if _nameslist_see_also is not None:
return
_nameslist_see_also = collections.defaultdict(set)
cp = None
line_re = re.compile(r"^(?:(?:([0-9A-F]{4,6})\t.*)|(?:^\s+([x=])\s+(.*)))$")
see_also_re = re.compile(r"\s*(?:\(.*\s-\s+([0-9A-F]{4,6})\))|([0-9A-F]{4,6})")
with open_unicode_data_file("NamesList.txt") as f:
for line in f:
m = line_re.match(line)
if not m:
continue
if m.group(1):
cp = int(m.group(1), 16)
else:
rel = m.group(2).strip()
val = m.group(3).strip()
if rel != "x":
continue
m = see_also_re.match(val)
if not m:
raise Exception(
'could not match see also val "%s" in line "%s"' % (val, line)
)
ref_cp = int(m.group(1) or m.group(2), 16)
_nameslist_see_also[cp].add(ref_cp)
def see_also(cp):
_load_nameslist_data()
return frozenset(_nameslist_see_also.get(cp))
def _load_namealiases_data():
global _namealiases_alt_names
if _namealiases_alt_names is not None:
return
_namealiases_alt_names = collections.defaultdict(list)
line_re = re.compile(r"([0-9A-F]{4,6});([^;]+);(.*)$")
with open_unicode_data_file("NameAliases.txt") as f:
for line in f:
m = line_re.match(line)
if not m:
continue
cp = int(m.group(1), 16)
name = m.group(2).strip()
name_type = m.group(3).strip()
if not name_type in [
"correction",
"control",
"alternate",
"figment",
"abbreviation",
]:
raise Exception('unknown name type in "%s"' % line)
if name_type == "figment":
continue
_namealiases_alt_names[cp].append((name, name_type))
def alt_names(cp):
"""Return list of name, nametype tuples for cp, or None."""
_load_namealiases_data()
return tuple(_namealiases_alt_names.get(cp))
if __name__ == "__main__":
all_sequences = sorted(get_emoji_sequences())
for k in all_sequences:
if not get_emoji_group_data(k):
print("no data:", seq_to_string(k))
for group in get_emoji_groups():
print("group:", group)
for subgroup in get_emoji_subgroups(group):
print(" subgroup:", subgroup)
print(" %d items" % len(get_emoji_in_group(group, subgroup)))
# dump some information for annotations
for k in get_sorted_emoji_sequences(all_sequences):
age = get_emoji_sequence_age(k)
if age == 12:
print(seq_to_string(k).replace("_", " "), "#", get_emoji_sequence_name(k))
|
googlefonts/nototools
|
nototools/unicode_data.py
|
Python
|
apache-2.0
| 57,418
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# This file can be used to pass maven project properties to python
# via string substitutions using the maven-resources-plugin
__project_group_id__ = "org.apache.systemml"
__project_artifact_id__ = "systemml"
__project_version__ = "0.13.0-incubating-SNAPSHOT"
|
asurve/arvind-sysml
|
src/main/python/systemml/project_info.py
|
Python
|
apache-2.0
| 1,177
|
# -*- coding: utf-8 -*-
'''
author: Jimmy
contact: 234390130@qq.com
file: storage.py
time: 2017/9/4 下午3:18
description:
'''
__author__ = 'Jimmy'
import pymongo
from ctp.ctp_struct import *
from bson import json_util as jsonb
from utils.tools import *
def _getDataBase():
client = pymongo.MongoClient(host='127.0.0.1', port=27017)
return client.trade
# 报单回报 OnRtnOrder
def insertRtnOrder(event):
db = _getDataBase()
dict = getStrategyInfo(event.dict)
db.order.insert(dict)
# 报单操作 OnErrRtnOrderAction
def insertErrRtnOrderAction(event):
db = _getDataBase()
dict = getStrategyInfo(event.dict)
db.order_action.insert(dict)
# 输入报单操作 OnRspOrderAction
def insertRspOrderAction(event):
db = _getDataBase()
dict = getStrategyInfo(event.dict)
db.input_order_action.insert(dict)
# 报单录入 OnRspOrderInsert OnErrRtnOrderInsert
def insertRspOrderInsert(event):
db = _getDataBase()
dict = getStrategyInfo(event.dict)
db.input_order.insert(dict)
# 交易回报 OnRtnTrade
def insertRtnTrade(event):
db = _getDataBase()
dict = getStrategyInfo(event.dict)
db.trade.insert(dict)
# 请求错误
def insertRspError(event):
db = _getDataBase()
dict = getStrategyInfo(event.dict)
db.error_info.insert(dict)
# db.CThostFtdcRspInfoField.insert(event.dict)
# 保存下单参数
def insertSendOrderArgs(event):
db = _getDataBase()
# 枚举类型要转字符串保存
direction = str(event.dict['direction']).split('.')[-1]
event.dict['direction'] = direction
price_type = str(event.dict['price_type']).split('.')[-1]
event.dict['price_type'] = price_type
stop_price = str(event.dict['stop_price']).split('.')[-1]
event.dict['stop_price'] = stop_price
contingent_condition = str(event.dict['contingent_condition']).split('.')[-1]
event.dict['contingent_condition'] = contingent_condition
event.dict = _insertTime(event.dict)
db.send_order.insert(event.dict)
# 保存撤单参数
def insertCancelOrderArgs(event):
db = _getDataBase()
event.dict = _insertTime(event.dict)
db.cancel_order.insert(event.dict)
# 更新持仓
def insertPosition(event):
db = _getDataBase()
dict = _insertTime(event.dict)
db.position.insert(dict)
# 更新账户
def updateAccount(event):
db = _getDataBase()
dict = _insertTime(event.dict)
if db.account.find().count() > 0:
db.account.update({'AccountID': dict['AccountID']},{"$set": dict})
else:
db.account.insert(dict)
# 插入时间
def _insertTime(dict):
date = getTime()
dict['insert_date'] = date[0]
dict['insert_time'] = date[1]
dict['insert_msec'] = date[2]
return dict
def getStrategyInfo(dict):
db = _getDataBase()
dict = _insertTime(dict)
result = list(db.send_order.find({'order_ref':int(dict['OrderRef'])}))
if len(result) > 0:
result = result[0]
dict['strategy_name'] = result['strategy_name']
dict['strategy_id'] = result['strategy_id']
else:
dict['strategy_name'] = '未知'
dict['strategy_id'] = '未知'
return dict
# 获取最大报单编号
def getMaxOrderRef():
db = _getDataBase()
result = list(db.send_order.find({}).sort([('order_ref', -1)]).limit(1))
if len(result) > 0:
result = result[0]
return int(result['order_ref'])
else:
return 0
def getMaxOrderActionRef():
db = _getDataBase()
result = list(db.cancel_order.find({}).sort([('order_action_ref', -1)]).limit(1))
if len(result) > 0:
result = result[0]
return int(result['order_action_ref'])
else:
return 0
if __name__ == '__main__':
def updateAccount(event):
db = _getDataBase()
if db.account.find().count() > 0:
db.account.update({'AccountID': event.dict['AccountID']},
{"$set": event.dict})
else:
db.account.insert(event.dict)
|
sjsj0101/backtestengine
|
utils/storage.py
|
Python
|
apache-2.0
| 4,040
|
"""
Test the "snabb lwaftr monitor" subcommand. Needs a NIC name and a TAP interface.
1. Execute "snabb lwaftr run" in on-a-stick mode and with the mirror option set.
2. Run "snabb lwaftr monitor" to set the counter and check its output.
"""
from random import randint
from subprocess import call, check_call
import unittest
from test_env import DATA_DIR, SNABB_CMD, BaseTestCase, nic_names
SNABB_PCI0 = nic_names()[0]
@unittest.skipUnless(SNABB_PCI0, 'NIC not configured')
class TestMonitor(BaseTestCase):
daemon_args = [
str(SNABB_CMD), 'lwaftr', 'run',
'--bench-file', '/dev/null',
'--conf', str(DATA_DIR / 'icmp_on_fail.conf'),
'--on-a-stick', SNABB_PCI0,
'--mirror', # TAP interface name added in setUpClass.
]
monitor_args = (str(SNABB_CMD), 'lwaftr', 'monitor', 'all')
# Use setUpClass to only setup the daemon once for all tests.
@classmethod
def setUpClass(cls):
# Create the TAP interface and append its name to daemon_args
# before calling the superclass' setUpClass, which needs both.
# 'tapXXXXXX' where X is a 0-9 digit.
cls.tap_name = 'tap%s' % randint(100000, 999999)
check_call(('ip', 'tuntap', 'add', cls.tap_name, 'mode', 'tap'))
cls.daemon_args.append(cls.tap_name)
try:
super(TestMonitor, cls).setUpClass()
except Exception:
# Clean up the TAP interface.
call(('ip', 'tuntap', 'delete', cls.tap_name, 'mode', 'tap'))
raise
def test_monitor(self):
monitor_args = list(self.monitor_args)
monitor_args.append(str(self.daemon.pid))
output = self.run_cmd(monitor_args)
self.assertIn(b'Mirror address set', output,
b'\n'.join((b'OUTPUT', output)))
self.assertIn(b'255.255.255.255', output,
b'\n'.join((b'OUTPUT', output)))
@classmethod
def tearDownClass(cls):
try:
super(TestMonitor, cls).tearDownClass()
finally:
# Clean up the TAP interface.
call(('ip', 'tuntap', 'delete', cls.tap_name, 'mode', 'tap'))
if __name__ == '__main__':
unittest.main()
|
heryii/snabb
|
src/program/lwaftr/tests/subcommands/monitor_test.py
|
Python
|
apache-2.0
| 2,186
|
import uuid
import pytest
from kazoo.testing import KazooTestCase
from kazoo.tests.util import CI_ZK_VERSION
class KazooQueueTests(KazooTestCase):
def _makeOne(self):
path = "/" + uuid.uuid4().hex
return self.client.Queue(path)
def test_queue_validation(self):
queue = self._makeOne()
with pytest.raises(TypeError):
queue.put({})
with pytest.raises(TypeError):
queue.put(b"one", b"100")
with pytest.raises(TypeError):
queue.put(b"one", 10.0)
with pytest.raises(ValueError):
queue.put(b"one", -100)
with pytest.raises(ValueError):
queue.put(b"one", 100000)
def test_empty_queue(self):
queue = self._makeOne()
assert len(queue) == 0
assert queue.get() is None
assert len(queue) == 0
def test_queue(self):
queue = self._makeOne()
queue.put(b"one")
queue.put(b"two")
queue.put(b"three")
assert len(queue) == 3
assert queue.get() == b"one"
assert queue.get() == b"two"
assert queue.get() == b"three"
assert len(queue) == 0
def test_priority(self):
queue = self._makeOne()
queue.put(b"four", priority=101)
queue.put(b"one", priority=0)
queue.put(b"two", priority=0)
queue.put(b"three", priority=10)
assert queue.get() == b"one"
assert queue.get() == b"two"
assert queue.get() == b"three"
assert queue.get() == b"four"
class KazooLockingQueueTests(KazooTestCase):
def setUp(self):
KazooTestCase.setUp(self)
skip = False
if CI_ZK_VERSION and CI_ZK_VERSION < (3, 4):
skip = True
elif CI_ZK_VERSION and CI_ZK_VERSION >= (3, 4):
skip = False
else:
ver = self.client.server_version()
if ver[1] < 4:
skip = True
if skip:
pytest.skip("Must use Zookeeper 3.4 or above")
def _makeOne(self):
path = "/" + uuid.uuid4().hex
return self.client.LockingQueue(path)
def test_queue_validation(self):
queue = self._makeOne()
with pytest.raises(TypeError):
queue.put({})
with pytest.raises(TypeError):
queue.put(b"one", b"100")
with pytest.raises(TypeError):
queue.put(b"one", 10.0)
with pytest.raises(ValueError):
queue.put(b"one", -100)
with pytest.raises(ValueError):
queue.put(b"one", 100000)
with pytest.raises(TypeError):
queue.put_all({})
with pytest.raises(TypeError):
queue.put_all([{}])
with pytest.raises(TypeError):
queue.put_all([b"one"], b"100")
with pytest.raises(TypeError):
queue.put_all([b"one"], 10.0)
with pytest.raises(ValueError):
queue.put_all([b"one"], -100)
with pytest.raises(ValueError):
queue.put_all([b"one"], 100000)
def test_empty_queue(self):
queue = self._makeOne()
assert len(queue) == 0
assert queue.get(0) is None
assert len(queue) == 0
def test_queue(self):
queue = self._makeOne()
queue.put(b"one")
queue.put_all([b"two", b"three"])
assert len(queue) == 3
assert not queue.consume()
assert not queue.holds_lock()
assert queue.get(1) == b"one"
assert queue.holds_lock()
# Without consuming, should return the same element
assert queue.get(1) == b"one"
assert queue.consume()
assert not queue.holds_lock()
assert queue.get(1) == b"two"
assert queue.holds_lock()
assert queue.consume()
assert not queue.holds_lock()
assert queue.get(1) == b"three"
assert queue.holds_lock()
assert queue.consume()
assert not queue.holds_lock()
assert not queue.consume()
assert len(queue) == 0
def test_consume(self):
queue = self._makeOne()
queue.put(b"one")
assert not queue.consume()
queue.get(0.1)
assert queue.consume()
assert not queue.consume()
def test_release(self):
queue = self._makeOne()
queue.put(b"one")
assert queue.get(1) == b"one"
assert queue.holds_lock()
assert queue.release()
assert not queue.holds_lock()
assert queue.get(1) == b"one"
assert queue.consume()
assert not queue.release()
assert len(queue) == 0
def test_holds_lock(self):
queue = self._makeOne()
assert not queue.holds_lock()
queue.put(b"one")
queue.get(0.1)
assert queue.holds_lock()
queue.consume()
assert not queue.holds_lock()
def test_priority(self):
queue = self._makeOne()
queue.put(b"four", priority=101)
queue.put(b"one", priority=0)
queue.put(b"two", priority=0)
queue.put(b"three", priority=10)
assert queue.get(1) == b"one"
assert queue.consume()
assert queue.get(1) == b"two"
assert queue.consume()
assert queue.get(1) == b"three"
assert queue.consume()
assert queue.get(1) == b"four"
assert queue.consume()
def test_concurrent_execution(self):
queue = self._makeOne()
value1 = []
value2 = []
value3 = []
event1 = self.client.handler.event_object()
event2 = self.client.handler.event_object()
event3 = self.client.handler.event_object()
def get_concurrently(value, event):
q = self.client.LockingQueue(queue.path)
value.append(q.get(0.1))
event.set()
self.client.handler.spawn(get_concurrently, value1, event1)
self.client.handler.spawn(get_concurrently, value2, event2)
self.client.handler.spawn(get_concurrently, value3, event3)
queue.put(b"one")
event1.wait(0.2)
event2.wait(0.2)
event3.wait(0.2)
result = value1 + value2 + value3
assert result.count(b"one") == 1
assert result.count(None) == 2
|
python-zk/kazoo
|
kazoo/tests/test_queue.py
|
Python
|
apache-2.0
| 6,221
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Exception for errors when there's an error in the Result
"""
from qiskit.exceptions import QiskitError
class ResultError(QiskitError):
"""Exceptions raised due to errors in result output.
It may be better for the Qiskit API to raise this exception.
Args:
error (dict): This is the error record as it comes back from
the API. The format is like::
error = {'status': 403,
'message': 'Your credits are not enough.',
'code': 'MAX_CREDITS_EXCEEDED'}
"""
def __init__(self, error):
super().__init__(error['message'])
self.status = error['status']
self.code = error['code']
def __str__(self):
return '{}: {}'.format(self.code, self.message)
|
QISKit/qiskit-sdk-py
|
qiskit/result/exceptions.py
|
Python
|
apache-2.0
| 1,290
|
from setuptools import setup, find_packages
setup(
name="HtmlNode",
version="0.1.8",
packages=find_packages(),
description="A simple Python HTML generator",
author="Hing-Lung Lau",
author_email="lung220@gmail.com",
url="http://github.com/hllau/html_node",
license="Apache v2",
keywords="html builder factory template generator",
classifiers = [
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML',
'License :: OSI Approved :: Apache Software License',
]
)
|
hllau/html_node
|
setup.py
|
Python
|
apache-2.0
| 750
|
import sys
sys.path.insert(1, "../../")
import h2o
def vec_show(ip,port):
# Connect to h2o
h2o.init(ip,port)
iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
print "iris:"
iris.show()
###################################################################
res = 2 - iris
res2 = res[0]
print "res2:"
res2.show()
res3 = res[1]
print "res3:"
res3.show()
iris[2].show()
if __name__ == "__main__":
h2o.run_test(sys.argv, vec_show)
|
ChristosChristofidis/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_vec_show.py
|
Python
|
apache-2.0
| 516
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
# We use the inorder to find which elements are left and right of the curr element.
# And the post order to start with the first elemeent and then construct right and left trees.
class Solution:
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:
def helper(inorderL, inorderR):
# base case
if inorderL >= inorderR:
return None
nonlocal postorder
curr = postorder.pop()
root = TreeNode(curr)
currPos = inorderMap[curr]
root.right = helper(currPos+1, inorderR)
root.left = helper(inorderL, currPos)
return root
inorderMap = {v:k for k, v in enumerate(inorder)}
return helper(0, len(inorder))
|
saisankargochhayat/algo_quest
|
leetcode/106. Construct Binary Tree from Inorder and Postorder Traversal/soln.py
|
Python
|
apache-2.0
| 978
|
from insights.parsers.hostname import Hostname
from insights.tests import context_wrap
HOSTNAME = "rhel7.example.com"
HOSTNAME_SHORT = "rhel7"
def test_hostname():
data = Hostname(context_wrap(HOSTNAME))
assert data.fqdn == "rhel7.example.com"
assert data.hostname == "rhel7"
assert data.domain == "example.com"
assert "{0}".format(data) == "<hostname: rhel7, domain: example.com>"
data = Hostname(context_wrap(HOSTNAME_SHORT))
assert data.fqdn == "rhel7"
assert data.hostname == "rhel7"
assert data.domain == ""
data = Hostname(context_wrap(""))
assert data.fqdn is None
assert data.hostname is None
assert data.domain is None
|
wcmitchell/insights-core
|
insights/parsers/tests/test_hostname.py
|
Python
|
apache-2.0
| 685
|
# coding: utf-8
"""
DLRN API
DLRN API client
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
class Params(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, max_age=None, success=None, job_id=None,
sequential_mode=None, previous_job_id=None, component=None):
"""Params - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'max_age': 'int',
'success': 'bool',
'job_id': 'str',
'sequential_mode': 'bool',
'previous_job_id': 'str',
'component': 'str'
}
self.attribute_map = {
'max_age': 'max_age',
'success': 'success',
'job_id': 'job_id',
'sequential_mode': 'sequential_mode',
'previous_job_id': 'previous_job_id',
'component': 'component'
}
self._max_age = max_age
self._success = success
self._job_id = job_id
self._sequential_mode = sequential_mode
self._previous_job_id = previous_job_id
self._component = component
@property
def max_age(self):
"""Gets the max_age of this Params.
Maximum age (in hours) for the repo to be considered.
Any repo tested or being tested after \"now - max_age\" will be taken
into account. If set to 0, all repos will be considered.
:return: The max_age of this Params.
:rtype: int
"""
return self._max_age
@max_age.setter
def max_age(self, max_age):
"""Sets the max_age of this Params.
Maximum age (in hours) for the repo to be considered.
Any repo tested or being tested after \"now - max_age\" will be taken
into account. If set to 0, all repos will be considered.
:param max_age: The max_age of this Params.
:type: int
"""
if max_age is None:
raise ValueError("Invalid value for `max_age`, must not be `None`")
if max_age is not None and max_age < 0:
raise ValueError("Invalid value for `max_age`, must be a value"
" greater than or equal to `0`")
self._max_age = max_age
@property
def success(self):
"""Gets the success of this Params.
If set to a value, find repos with a successful/unsuccessful vote
(as specified). If not set, any tested repo will be considered.
:return: The success of this Params.
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this Params.
If set to a value, find repos with a successful/unsuccessful vote
(as specified). If not set, any tested repo will be considered.
:param success: The success of this Params.
:type: bool
"""
self._success = success
@property
def job_id(self):
"""Gets the job_id of this Params.
Name of the CI that sent the vote. If not set, no filter will be set
on CI.
:return: The job_id of this Params.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this Params.
Name of the CI that sent the vote. If not set, no filter will be set
on CI.
:param job_id: The job_id of this Params.
:type: str
"""
self._job_id = job_id
@property
def sequential_mode(self):
"""Gets the sequential_mode of this Params.
Use the sequential mode algorithm. In this case, return the last tested
repo within that timeframe for the CI job described by previous_job_id.
Defaults to false.
:return: The sequential_mode of this Params.
:rtype: bool
"""
return self._sequential_mode
@sequential_mode.setter
def sequential_mode(self, sequential_mode):
"""Sets the sequential_mode of this Params.
Use the sequential mode algorithm. In this case, return the last tested
repo within that timeframe for the CI job described by previous_job_id.
Defaults to false.
:param sequential_mode: The sequential_mode of this Params.
:type: bool
"""
self._sequential_mode = sequential_mode
@property
def previous_job_id(self):
"""Gets the previous_job_id of this Params.
If sequential_mode is set to true, look for jobs tested by the CI
identified by previous_job_id.
:return: The previous_job_id of this Params.
:rtype: str
"""
return self._previous_job_id
@previous_job_id.setter
def previous_job_id(self, previous_job_id):
"""Sets the previous_job_id of this Params.
If sequential_mode is set to true, look for jobs tested by the CI
identified by previous_job_id.
:param previous_job_id: The previous_job_id of this Params.
:type: str
"""
self._previous_job_id = previous_job_id
@property
def component(self):
"""Gets the component of this Params.
additional notes
:return: The component of this Params.
:rtype: str
"""
return self._component
@component.setter
def component(self, component):
"""Sets the component of this Params.
additional notes
:param component: The component of this Params.
:type: str
"""
self._component = component
def to_dict(self):
"""Returns the model properties as a dict """
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model """
return pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint` """
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal """
if not isinstance(other, Params):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal """
return not self == other
|
javierpena/dlrnapi_client
|
dlrnapi_client/models/params.py
|
Python
|
apache-2.0
| 7,405
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Conclusion.title'
db.alter_column(u'itest_conclusion', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
# Changing field 'Answer.title'
db.alter_column(u'itest_answer', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
# Changing field 'Question.title'
db.alter_column(u'itest_question', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
# Changing field 'Test.title'
db.alter_column(u'itest_test', 'title', self.gf('django.db.models.fields.CharField')(max_length=250, null=True))
def backwards(self, orm):
# Changing field 'Conclusion.title'
db.alter_column(u'itest_conclusion', 'title', self.gf('django.db.models.fields.CharField')(default='', max_length=250))
# Changing field 'Answer.title'
db.alter_column(u'itest_answer', 'title', self.gf('django.db.models.fields.CharField')(default=1, max_length=250))
# Changing field 'Question.title'
db.alter_column(u'itest_question', 'title', self.gf('django.db.models.fields.CharField')(default=1, max_length=250))
# Changing field 'Test.title'
db.alter_column(u'itest_test', 'title', self.gf('django.db.models.fields.CharField')(default='a', max_length=250))
models = {
'itest.answer': {
'Meta': {'object_name': 'Answer'},
'conclusion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['itest.Conclusion']"}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jump': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['itest.Question']"}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['itest.Question']"}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'itest.conclusion': {
'Meta': {'ordering': "['num']", 'object_name': 'Conclusion'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'conclusions'", 'to': "orm['itest.Test']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'itest.question': {
'Meta': {'ordering': "['num']", 'object_name': 'Question'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'test': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['itest.Test']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'itest.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'word': ('django.db.models.fields.CharField', [], {'max_length': '35'})
},
'itest.test': {
'Meta': {'ordering': "['num']", 'object_name': 'Test'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '5850', 'null': 'True', 'blank': 'True'}),
'create_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '450', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'tests'", 'symmetrical': 'False', 'to': "orm['itest.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['itest']
|
xuender/test
|
testAdmin/itest/migrations/0012_auto__chg_field_conclusion_title__chg_field_answer_title__chg_field_qu.py
|
Python
|
apache-2.0
| 5,840
|
# Copyright 2015 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.bigip.tm.asm.policies.character_sets import Character_Sets
from f5.sdk_exception import UnsupportedOperation
import mock
import pytest
@pytest.fixture
def FakeChar():
fake_policy = mock.MagicMock()
fake_e = Character_Sets(fake_policy)
fake_e._meta_data['bigip'].tmos_version = '11.6.0'
return fake_e
class TestCharacterSets(object):
def test_create_raises(self, FakeChar):
with pytest.raises(UnsupportedOperation):
FakeChar.create()
def test_delete_raises(self, FakeChar):
with pytest.raises(UnsupportedOperation):
FakeChar.delete()
|
F5Networks/f5-common-python
|
f5/bigip/tm/asm/policies/test/unit/test_character_sets.py
|
Python
|
apache-2.0
| 1,194
|
from typing import Optional, List
def a(x):
# type: (List[int]) -> List[str]
return <warning descr="Expected type 'List[str]', got 'List[List[int]]' instead">[x]</warning>
def b(x):
# type: (int) -> List[str]
return <warning descr="Expected type 'List[str]', got 'List[int]' instead">[1,2]</warning>
def c():
# type: () -> int
return <warning descr="Expected type 'int', got 'str' instead">'abc'</warning>
def d(x):
# type: (x: int) -> List[str]
return [str(x)]
def e():
# type: () -> int
pass
def f():
# type: () -> Optional[str]
x = int(input())
if x > 0:
return <warning descr="Expected type 'Optional[str]', got 'int' instead">42</warning>
elif x == 0:
return 'abc'
else:
return
def g(x):
# type: (Any) -> int
if x:
return <warning descr="Expected type 'int', got 'str' instead">'abc'</warning>
else:
return <warning descr="Expected type 'int', got 'dict' instead">{}</warning>
|
hurricup/intellij-community
|
python/testData/inspections/PyTypeCheckerInspection/FunctionReturnType.py
|
Python
|
apache-2.0
| 1,000
|
from wx import wx
from Converter import Converter
class GameMenuViewModel(object):
def __init__(self, viewSetter):
self.viewSetter = viewSetter
def displayGameMenu(self):
wx.CallAfter(self.viewSetter.setView, "GameMenu")
def animateCurrentPrices(self, currentPricesJavaMap):
wx.CallAfter(
self.viewSetter.getView("GameMenu").animateCurrentPrices,
Converter(self.viewSetter.gateway).convertJavaMapToDict(currentPricesJavaMap)
)
class Java:
implements = ["py4jmediator.ViewModel$GameMenuViewModel"]
|
DarthThanatos/citySimNG
|
citySimNGView/viewmodel/GameMenuViewModel.py
|
Python
|
apache-2.0
| 583
|
class Backend(object):
'''
Backend type with a plugin and zero or more parameters (Parameter functionality is TBD.
Links to categories handled by this backend
'''
def __init__(self, plugin, params):
self._plugin = plugin
self._params = params
self._categories = []
@property
def plugin(self):
return self._plugin
@property
def params(self):
return self._params
def add_category(self, category):
self._categories.append(category)
|
compatibleone/accords-platform
|
tools/codegen/OCCI/Backend.py
|
Python
|
apache-2.0
| 544
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for base class NrfMatterDevice."""
from unittest import mock
from absl.testing import parameterized
from gazoo_device import errors
from gazoo_device.base_classes import nrf_matter_device
from gazoo_device.capabilities import device_power_default
from gazoo_device.capabilities import pwrpc_common_default
from gazoo_device.tests.unit_tests.capability_tests.mixins import pwrpc_common_test
from gazoo_device.tests.unit_tests.utils import fake_device_test_case
import immutabledict
_FAKE_DEVICE_ID = "nrfmatterdevicestub-detect"
_FAKE_DEVICE_ADDRESS = "fake-device-address"
_FAKE_VENDOR_ID = "fake-vendor-id"
_FAKE_PRODUCT_ID = "fake-product-id"
_NRF_CONNECT_PERSISTENT_PROPERTIES = immutabledict.immutabledict({
"os": "Zephyr RTOS",
"platform": "nRF Connect",
"serial_number": "FT2BSR6O",
"name": "nrfmatterdevicestub_detect",
"device_type": "nrfmatterdevicestub",
"vendor_id": _FAKE_VENDOR_ID,
"product_id": _FAKE_PRODUCT_ID,
})
class NrfMatterDeviceStub(nrf_matter_device.NrfMatterDevice):
"""Dummy implementation for instantiation of NrfMatterDevice."""
DEVICE_TYPE = "nrfmatterdevicestub"
ENDPOINT_ID_TO_CLASS = {}
class NrfMatterDeviceTest(fake_device_test_case.FakeDeviceTestCase,
pwrpc_common_test.PigweedRpcCommonTestMixin):
"""Unit tests for base class NrfMatterDevice."""
def setUp(self):
super().setUp()
self.setup_fake_device_requirements(_FAKE_DEVICE_ID)
self.device_config["persistent"]["console_port_name"] = _FAKE_DEVICE_ADDRESS
jlink_patcher = mock.patch("pylink.JLink")
jlink_patcher.start()
self.addCleanup(jlink_patcher.stop)
self.uut = NrfMatterDeviceStub(self.mock_manager,
self.device_config,
log_directory=self.artifacts_directory)
@mock.patch.object(nrf_matter_device.os.path, "exists", return_value=True)
def test_is_connected_true(self, mock_exists):
"""Verifies is_connected returns true when console port exists."""
self.assertTrue(
nrf_matter_device.NrfMatterDevice.is_connected(self.device_config))
mock_exists.assert_called_once()
@mock.patch.object(
nrf_matter_device.NrfMatterDevice, "pw_rpc_common")
def test_get_detection_info_on_success(self, mock_rpc_common):
"""Verifies persistent properties are set correctly."""
mock_rpc_common.vendor_id = _FAKE_VENDOR_ID
mock_rpc_common.product_id = _FAKE_PRODUCT_ID
self._test_get_detection_info(
console_port_name=_FAKE_DEVICE_ADDRESS,
device_class=NrfMatterDeviceStub,
persistent_properties=_NRF_CONNECT_PERSISTENT_PROPERTIES)
def test_flash_build_capability(self):
"""Verifies the initialization of flash_build capability."""
self.assertTrue(self.uut.flash_build)
def test_matter_endpoints_capability(self):
"""Verifies the initialization of matter_endpoints capability."""
self.assertIsNotNone(self.uut.matter_endpoints)
def test_device_power_capability(self):
"""Verifies the initialization of device_power capability."""
self.assertIsNotNone(self.uut.device_power)
@mock.patch.object(
device_power_default.DevicePowerDefault, "cycle", autospec=True)
def test_device_reboot_hard(self, reboot_fn):
self.uut.reboot(method="hard")
reboot_fn.assert_called_once()
@parameterized.parameters(dict(method="soft"), dict(method="pw_rpc"))
@mock.patch.object(
pwrpc_common_default.PwRPCCommonDefault, "reboot", autospec=True)
def test_device_reboot(self, reboot_fn, method):
self.uut.reboot(method)
reboot_fn.assert_called_once()
def test_device_reboot_raise_error(self):
"""Test reboot method with invalid method."""
with self.assertRaisesRegex(
errors.DeviceError,
r"ValueError: Method invalid_reboot_method not recognized"):
self.uut.reboot(method="invalid_reboot_method")
if __name__ == "__main__":
fake_device_test_case.main()
|
google/gazoo-device
|
gazoo_device/tests/unit_tests/nrf_matter_device_test.py
|
Python
|
apache-2.0
| 4,549
|
"""Test suite for the custom import logic."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import require
def test_patch_replaces_and_restores():
"""Ensure the import function is patched and restored correctly."""
i = __import__
require.patch_import()
assert i is not __import__
require.unpatch_import()
assert i is __import__
def test_require_gets_local():
"""Check if require finds the local pymodule."""
t1_import_test = require.require('import_test')
assert '.pymodules' in repr(t1_import_test)
def test_require_uses_module_cache():
"""Check if modules are cached when working with pymodules."""
t2_import_test = require.require('import_test')
t3_import_test = require.require('import_test')
assert t2_import_test is t3_import_test
def test_require_not_conflict_with_import():
"""Ensure that using require does not interfere with normal imports."""
setuptools = require.require('setuptools')
import setuptools as setuptools2
assert setuptools2 is not setuptools
def test_require_not_conflict_site_py():
"""Ensure require does not clobber pre-loaded builtins."""
re2 = require.require('re')
assert '.pymodules' not in repr(re2)
|
kevinconway/require.py
|
tests/test_imports.py
|
Python
|
apache-2.0
| 1,350
|
# -*- coding: UTF-8 -*-
import pigpio
class Car(object):
PINS = ['left_pin', 'right_pin', 'forward_pin', 'backward_pin',
'enable_moving', 'enable_turning']
def __init__(self, left_pin, right_pin, forward_pin, backward_pin,
enable_moving, enable_turning, start_power=65):
self._left_pin = left_pin
self._right_pin = right_pin
self._forward_pin = forward_pin
self._backward_pin = backward_pin
self._enable_moving = enable_moving
self._enable_turning = enable_turning
self._setup_gpio()
self._moving_pwm_started = False
self._power = start_power
def _setup_gpio(self):
self._pi = pigpio.pi()
self._pi.set_mode(self._left_pin, pigpio.OUTPUT)
self._pi.set_mode(self._right_pin, pigpio.OUTPUT)
self._pi.set_mode(self._forward_pin, pigpio.OUTPUT)
self._pi.set_mode(self._backward_pin, pigpio.OUTPUT)
self._pi.set_mode(self._enable_moving, pigpio.OUTPUT)
self._pi.set_mode(self._enable_turning, pigpio.OUTPUT)
self._pi.set_PWM_range(self._enable_moving, 100)
self._pi.set_PWM_frequency(self._enable_moving, 100) # channel, frequency
def turn_left(self):
self._pi.write(self._enable_turning, True)
self._pi.write(self._right_pin, False)
self._pi.write(self._left_pin, True)
def turn_right(self):
self._pi.write(self._enable_turning, True)
self._pi.write(self._left_pin, False)
self._pi.write(self._right_pin, True)
def straight(self):
self._pi.write(self._left_pin, False)
self._pi.write(self._right_pin, False)
self._pi.write(self._enable_turning, False)
def move_forward(self):
self._pi.write(self._backward_pin, False)
self._pi.write(self._forward_pin, True)
self._start_moving_pwm()
def move_backward(self):
self._pi.write(self._forward_pin, False)
self._pi.write(self._backward_pin, True)
self._start_moving_pwm()
def faster(self, change_value=15):
if self._power + change_value > 100:
self._power = 100
else:
self._power += change_value
self._change_power()
def slower(self, change_value=15):
if self._power - change_value < 30:
self._power = 30
else:
self._power -= change_value
self._change_power()
def stop_moving(self):
self._pi.set_PWM_dutycycle(self._enable_turning, 0)
self._pi.write(self._backward_pin, False)
self._pi.write(self._forward_pin, False)
self._moving_pwm_started = False
def stop(self):
self.stop_moving()
self._pi.write(self._left_pin, False)
self._pi.write(self._right_pin, False)
self._pi.write(self._enable_turning, False)
def _start_moving_pwm(self):
if self._moving_pwm_started:
return
self._pi.set_PWM_dutycycle(self._enable_moving, self._power)
self._moving_pwm_started = True
def _change_power(self):
self._pi.set_PWM_dutycycle(self._enable_moving, self._power)
|
lukaszo/picar_worhshop
|
picar/car.py
|
Python
|
apache-2.0
| 3,163
|
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from mapproxy.compat.image import Image, transform_uses_center
from mapproxy.image import ImageSource, image_filter
from mapproxy.srs import make_lin_transf, bbox_equals
class ImageTransformer(object):
"""
Transform images between different bbox and spatial reference systems.
:note: The transformation doesn't make a real transformation for each pixel,
but a mesh transformation (see `PIL Image.transform`_).
It will divide the target image into rectangles (a mesh). The
source coordinates for each rectangle vertex will be calculated.
The quadrilateral will then be transformed with the source coordinates
into the destination quad (affine).
The number of quads is calculated dynamically to keep the deviation in
the image transformation below one pixel.
.. _PIL Image.transform:
http://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.transform
::
src quad dst quad
.----. <- coord- .----.
/ / transformation | |
/ / | |
.----. img-transformation -> .----.----
| | |
---------------.
large src image large dst image
"""
def __init__(self, src_srs, dst_srs, max_px_err=1):
"""
:param src_srs: the srs of the source image
:param dst_srs: the srs of the target image
:param resampling: the resampling method used for transformation
:type resampling: nearest|bilinear|bicubic
"""
self.src_srs = src_srs
self.dst_srs = dst_srs
self.dst_bbox = self.dst_size = None
self.max_px_err = max_px_err
def transform(self, src_img, src_bbox, dst_size, dst_bbox, image_opts):
"""
Transforms the `src_img` between the source and destination SRS
of this ``ImageTransformer`` instance.
When the ``src_srs`` and ``dst_srs`` are equal the image will be cropped
and not transformed. If the `src_bbox` and `dst_bbox` are equal,
the `src_img` itself will be returned.
:param src_img: the source image for the transformation
:param src_bbox: the bbox of the src_img
:param dst_size: the size of the result image (in pizel)
:type dst_size: ``(int(width), int(height))``
:param dst_bbox: the bbox of the result image
:return: the transformed image
:rtype: `ImageSource`
"""
if self._no_transformation_needed(src_img.size, src_bbox, dst_size, dst_bbox):
return src_img
if self.src_srs == self.dst_srs:
result = self._transform_simple(src_img, src_bbox, dst_size, dst_bbox,
image_opts)
else:
result = self._transform(src_img, src_bbox, dst_size, dst_bbox, image_opts)
result.cacheable = src_img.cacheable
return result
def _transform_simple(self, src_img, src_bbox, dst_size, dst_bbox, image_opts):
"""
Do a simple crop/extent transformation.
"""
src_quad = (0, 0, src_img.size[0], src_img.size[1])
to_src_px = make_lin_transf(src_bbox, src_quad)
minx, miny = to_src_px((dst_bbox[0], dst_bbox[3]))
maxx, maxy = to_src_px((dst_bbox[2], dst_bbox[1]))
src_res = ((src_bbox[0]-src_bbox[2])/src_img.size[0],
(src_bbox[1]-src_bbox[3])/src_img.size[1])
dst_res = ((dst_bbox[0]-dst_bbox[2])/dst_size[0],
(dst_bbox[1]-dst_bbox[3])/dst_size[1])
tenth_px_res = (abs(dst_res[0]/(dst_size[0]*10)),
abs(dst_res[1]/(dst_size[1]*10)))
if (abs(src_res[0]-dst_res[0]) < tenth_px_res[0] and
abs(src_res[1]-dst_res[1]) < tenth_px_res[1]):
# rounding might result in subpixel inaccuracy
# this exact resolutioni match should only happen in clients with
# fixed resolutions like OpenLayers
minx = int(round(minx))
miny = int(round(miny))
result = src_img.as_image().crop((minx, miny,
minx+dst_size[0], miny+dst_size[1]))
else:
img = img_for_resampling(src_img.as_image(), image_opts.resampling)
result = img.transform(dst_size, Image.EXTENT,
(minx, miny, maxx, maxy),
image_filter[image_opts.resampling])
return ImageSource(result, size=dst_size, image_opts=image_opts)
def _transform(self, src_img, src_bbox, dst_size, dst_bbox, image_opts):
"""
Do a 'real' transformation with a transformed mesh (see above).
"""
meshes = transform_meshes(
src_size=src_img.size,
src_bbox=src_bbox,
src_srs=self.src_srs,
dst_size=dst_size,
dst_bbox=dst_bbox,
dst_srs=self.dst_srs,
max_px_err=self.max_px_err,
)
img = img_for_resampling(src_img.as_image(), image_opts.resampling)
result = img.transform(dst_size, Image.MESH, meshes,
image_filter[image_opts.resampling])
if False:
# draw mesh for debuging
from PIL import ImageDraw
draw = ImageDraw.Draw(result)
for g, _ in meshes:
draw.rectangle(g, fill=None, outline=(255, 0, 0))
return ImageSource(result, size=dst_size, image_opts=image_opts)
def _no_transformation_needed(self, src_size, src_bbox, dst_size, dst_bbox):
"""
>>> src_bbox = (-2504688.5428486541, 1252344.271424327,
... -1252344.271424327, 2504688.5428486541)
>>> dst_bbox = (-2504688.5431999983, 1252344.2704,
... -1252344.2719999983, 2504688.5416000001)
>>> from mapproxy.srs import SRS
>>> t = ImageTransformer(SRS(900913), SRS(900913))
>>> t._no_transformation_needed((256, 256), src_bbox, (256, 256), dst_bbox)
True
"""
xres = (dst_bbox[2]-dst_bbox[0])/dst_size[0]
yres = (dst_bbox[3]-dst_bbox[1])/dst_size[1]
return (src_size == dst_size and
self.src_srs == self.dst_srs and
bbox_equals(src_bbox, dst_bbox, xres/10, yres/10))
def transform_meshes(src_size, src_bbox, src_srs, dst_size, dst_bbox, dst_srs, max_px_err=1):
"""
transform_meshes creates a list of QUAD transformation parameters for PIL's
MESH image transformation.
Each QUAD is a rectangle in the destination image, like ``(0, 0, 100, 100)`` and
a list of four pixel coordinates in the source image that match the destination rectangle.
The four points form a quadliteral (i.e. not a rectangle).
PIL's image transform uses affine transformation to fill each rectangle in the destination
image with data from the source quadliteral.
The number of QUADs is calculated dynamically to keep the deviation in the image
transformation below one pixel. Image transformations for large map scales can be transformed with
1-4 QUADs most of the time. For low scales, transform_meshes can generate a few hundred QUADs.
It generates a maximum of one QUAD per 50 pixel.
"""
src_bbox = src_srs.align_bbox(src_bbox)
dst_bbox = dst_srs.align_bbox(dst_bbox)
src_rect = (0, 0, src_size[0], src_size[1])
dst_rect = (0, 0, dst_size[0], dst_size[1])
to_src_px = make_lin_transf(src_bbox, src_rect)
to_src_w = make_lin_transf(src_rect, src_bbox)
to_dst_w = make_lin_transf(dst_rect, dst_bbox)
meshes = []
# more recent versions of Pillow use center coordinates for
# transformations, we manually need to add half a pixel otherwise
if transform_uses_center():
px_offset = 0.0
else:
px_offset = 0.5
def dst_quad_to_src(quad):
src_quad = []
for dst_px in [(quad[0], quad[1]), (quad[0], quad[3]),
(quad[2], quad[3]), (quad[2], quad[1])]:
dst_w = to_dst_w(
(dst_px[0] + px_offset, dst_px[1] + px_offset))
src_w = dst_srs.transform_to(src_srs, dst_w)
src_px = to_src_px(src_w)
src_quad.extend(src_px)
return quad, src_quad
res = (dst_bbox[2] - dst_bbox[0]) / dst_size[0]
max_err = max_px_err * res
def is_good(quad, src_quad):
w = quad[2] - quad[0]
h = quad[3] - quad[1]
if w < 50 or h < 50:
return True
xc = quad[0] + w / 2.0 - 0.5
yc = quad[1] + h / 2.0 - 0.5
# coordinate for the center of the quad
dst_w = to_dst_w((xc, yc))
# actual coordinate for the center of the quad
src_px = center_quad_transform(quad, src_quad)
real_dst_w = src_srs.transform_to(dst_srs, to_src_w(src_px))
err = max(abs(dst_w[0] - real_dst_w[0]), abs(dst_w[1] - real_dst_w[1]))
return err < max_err
# recursively add meshes. divide each quad into four sub quad till
# accuracy is good enough.
def add_meshes(quads):
for quad in quads:
quad, src_quad = dst_quad_to_src(quad)
if is_good(quad, src_quad):
meshes.append((quad, src_quad))
else:
add_meshes(divide_quad(quad))
add_meshes([(0, 0, dst_size[0], dst_size[1])])
return meshes
def center_quad_transform(quad, src_quad):
"""
center_quad_transfrom transforms the center pixel coordinates
from ``quad`` to ``src_quad`` by using affine transformation
as used by PIL.Image.transform.
"""
w = quad[2] - quad[0]
h = quad[3] - quad[1]
nw = src_quad[0:2]
sw = src_quad[2:4]
se = src_quad[4:6]
ne = src_quad[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
a0 = x0
a1 = (ne[0] - x0) * As
a2 = (sw[0] - x0) * At
a3 = (se[0] - sw[0] - ne[0] + x0) * As * At
a4 = y0
a5 = (ne[1] - y0) * As
a6 = (sw[1] - y0) * At
a7 = (se[1] - sw[1] - ne[1] + y0) * As * At
x = w / 2.0 - 0.5
y = h / 2.0 - 0.5
return (
a0 + a1*x + a2*y + a3*x*y,
a4 + a5*x + a6*y + a7*x*y
)
def img_for_resampling(img, resampling):
"""
Convert P images to RGB(A) for non-NEAREST resamplings.
"""
resampling = image_filter[resampling]
if img.mode == 'P' and resampling != Image.NEAREST:
img.load() # load to get actual palette mode
if img.palette is not None:
# palette can still be None for cropped images
img = img.convert(img.palette.mode)
else:
img = img.convert('RGBA')
return img
def divide_quad(quad):
"""
divide_quad in up to four sub quads. Only divide horizontal if quad is twice as wide then high,
and vertical vice versa.
PIL.Image.transform expects that the lower-right corner
of a quad overlaps by one pixel.
>>> divide_quad((0, 0, 500, 500))
[(0, 0, 250, 250), (250, 0, 500, 250), (0, 250, 250, 500), (250, 250, 500, 500)]
>>> divide_quad((0, 0, 2000, 500))
[(0, 0, 1000, 500), (1000, 0, 2000, 500)]
>>> divide_quad((100, 200, 200, 500))
[(100, 200, 200, 350), (100, 350, 200, 500)]
"""
w = quad[2] - quad[0]
h = quad[3] - quad[1]
xc = int(quad[0] + w/2)
yc = int(quad[1] + h/2)
if w > 2*h:
return [
(quad[0], quad[1], xc, quad[3]),
(xc, quad[1], quad[2], quad[3]),
]
if h > 2*w:
return [
(quad[0], quad[1], quad[2], yc),
(quad[0], yc, quad[2], quad[3]),
]
return [
(quad[0], quad[1], xc, yc),
(xc, quad[1], quad[2], yc),
(quad[0], yc, xc, quad[3]),
(xc, yc, quad[2], quad[3]),
]
|
olt/mapproxy
|
mapproxy/image/transform.py
|
Python
|
apache-2.0
| 12,700
|
import numpy as np
# Inner (or dot) product
a = np.array([1,2])
b = np.array([3,4])
np.inner(a, b)
a.dot(b)
# Outer product
a = np.array([1,2])
b = np.array([3,4])
np.outer(a, b)
# Inverse
m = np.array([[1,2], [3,4]])
np.linalg.inv(m)
# Inner (or dot) product
m = np.array([[1,2], [3,4]])
minv = np.linalg.inv(m)
m.dot(minv)
# Diagonal
m = np.array([[1,2], [3,4]])
np.diag(m)
m = np.array([1,2])
np.diag(m)
# Determinant
m = np.array([[1,2], [3,4]])
np.linalg.det(m)
# Trace - sum of elements of the diagonal
m = np.array([[1,2], [3,4]])
np.diag(m)
np.diag(m).sum()
np.trace(m)
# Transpose
m = np.array([ [1,2], [3,4] ])
m.T
# Gaussian distribution
m = np.random.randn(2,3)
m
# Covariance
X = np.random.randn(100,3)
np.cov(X.T)
# Eigen vectors and values
# For symmetric matrix (m == m.T) and hermitian matrix (m = m.H) we use eigh.
m = np.array([
[ 0.89761228, 0.00538701, -0.03229084],
[ 0.00538701, 1.04860676, -0.25001666],
[-0.03229084, -0.25001666, 0.81116126]])
# The first tuple contains three Eigen values.
# The second tuple contains Eigen vectors stored in columns.
np.linalg.eigh(m)
# Solving linear systems
# The admissions fee at a small far is $1.50 for children an $4.00 for adults.
# On a certain day 2,200 people enter the fair and $5050 is collected.
# How many children and how many adults attended.
#
# Let X1 = number of children
# Let X2 = number of adults
# X1 + X2 = 2200
# 1.5X1 + 4X2 = 5050
a = np.array([ [1,1], [1.5,4] ])
b = np.array( [ 2200, 5050] )
np.linalg.solve(a, b)
|
martinahogg/machinelearning
|
tools/numpy-examples.py
|
Python
|
apache-2.0
| 1,533
|
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import inspect
import os
import shutil
import tempfile
import fixtures
import mock
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_utils import units
from oslo_utils import uuidutils
from nova import context
from nova import exception
from nova import keymgr
from nova import objects
from nova.openstack.common import imageutils
from nova import test
from nova.tests.unit import fake_processutils
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import rbd_utils
CONF = cfg.CONF
CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
class _ImageTestCase(object):
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instances_path=self.INSTANCES_PATH)
self.INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
self.INSTANCE['uuid'], 'disk.info')
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.CONTEXT = context.get_admin_context()
self.OLD_STYLE_INSTANCE_PATH = \
fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
self.PATH = os.path.join(
fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def tearDown(self):
super(_ImageTestCase, self).tearDown()
shutil.rmtree(self.INSTANCES_PATH)
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: True)
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, '_can_fallocate', lambda: True)
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(os, 'access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_libvirt_fs_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
fs = image.libvirt_fs_info("/mnt")
# check that exception hasn't been raised and the method
# returned correct object
self.assertIsInstance(fs, vconfig.LibvirtConfigGuestFilesys)
self.assertEqual(fs.target_dir, "/mnt")
if image.is_block_dev:
self.assertEqual(fs.source_type, "block")
self.assertEqual(fs.source_dev, image.path)
else:
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.source_file, image.path)
@mock.patch('nova.virt.disk.api.get_disk_size')
def test_get_disk_size(self, get_disk_size):
get_disk_size.return_value = 2361393152
image = self.image_class(self.INSTANCE, self.NAME)
self.assertEqual(2361393152, image.get_disk_size(image.path))
get_disk_size.assert_called_once_with(image.path)
class RawTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Raw
super(RawTestCase, self).setUp()
self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
self.mox.VerifyAll()
def test_create_image_generated(self):
fn = self.prepare_mocks()
fn(target=self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
def test_create_image_extend(self, fake_qemu_img_info):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
self.mox.VerifyAll()
def test_correct_format(self):
self.stubs.UnsetAll()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
info = self.mox.CreateMockAnything()
info.file_format = 'foo'
imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
os.path.exists(CONF.instances_path).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
self.assertEqual(image.driver_format, 'foo')
self.mox.VerifyAll()
@mock.patch.object(images, 'qemu_img_info',
side_effect=exception.InvalidDiskInfo(
reason='invalid path'))
def test_resolve_driver_format(self, fake_qemu_img_info):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'raw')
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = units.Gi
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / units.Gi))
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'create_cow_image')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(CONF.instances_path).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_with_size(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.Image,
'verify_base_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
imagebackend.Image.verify_base_size(self.TEMPLATE_PATH, self.SIZE)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_too_small(self):
fn = self.prepare_mocks()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.Qcow2, 'get_disk_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
imagebackend.Qcow2.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(exception.FlavorDiskTooSmall,
image.create_image, fn, self.TEMPLATE_PATH, 1)
self.mox.VerifyAll()
def test_generate_resized_backing_files(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
self.mox.StubOutWithMock(imagebackend.Image,
'verify_base_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(CONF.instances_path).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(self.QCOW2_BASE)
os.path.exists(self.QCOW2_BASE).AndReturn(False)
imagebackend.Image.verify_base_size(self.TEMPLATE_PATH, self.SIZE)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
self.QCOW2_BASE)
imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE, use_cow=True)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_qcow2_exists_and_has_no_backing_file(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'get_disk_backing_file')
self.mox.StubOutWithMock(imagebackend.Image,
'verify_base_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
.AndReturn(None)
imagebackend.Image.verify_base_size(self.TEMPLATE_PATH, self.SIZE)
os.path.exists(self.PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_resolve_driver_format(self):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'qcow2')
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(images_volume_group=self.VG, group='libvirt')
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.INSTANCE['ephemeral_key_uuid'] = None
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.lvm = imagebackend.lvm
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.disk, 'resize2fs')
self.mox.StubOutWithMock(self.lvm, 'create_volume')
self.mox.StubOutWithMock(self.disk, 'get_disk_size')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def _create_image(self, sparse):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
self.lvm.create_volume(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def _create_image_generated(self, sparse):
fn = self.prepare_mocks()
self.lvm.create_volume(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn(target=self.PATH, ephemeral_size=None)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
self.mox.VerifyAll()
def _create_image_resize(self, sparse):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.lvm.create_volume(self.VG, self.LV,
self.SIZE, sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.disk.resize2fs(self.PATH, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
fn = self.prepare_mocks()
fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
self.lvm.create_volume(self.VG,
self.LV,
self.SIZE,
sparse=False
).AndRaise(RuntimeError())
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
self.lvm.remove_volumes([self.PATH])
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_generated_negative(self):
fn = self.prepare_mocks()
fn(target=self.PATH,
ephemeral_size=None).AndRaise(RuntimeError())
self.lvm.create_volume(self.VG,
self.LV,
self.SIZE,
sparse=False)
self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
self.lvm.remove_volumes([self.PATH])
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
super(EncryptedLvmTestCase, self).setUp()
self.image_class = imagebackend.Lvm
self.flags(enabled=True, group='ephemeral_storage_encryption')
self.flags(cipher='aes-xts-plain64',
group='ephemeral_storage_encryption')
self.flags(key_size=512, group='ephemeral_storage_encryption')
self.flags(fixed_key='00000000000000000000000000000000'
'00000000000000000000000000000000',
group='keymgr')
self.flags(images_volume_group=self.VG, group='libvirt')
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.LV_PATH = os.path.join('/dev', self.VG, self.LV)
self.PATH = os.path.join('/dev/mapper',
imagebackend.dmcrypt.volume_name(self.LV))
self.key_manager = keymgr.API()
self.INSTANCE['ephemeral_key_uuid'] =\
self.key_manager.create_key(self.CONTEXT)
self.KEY = self.key_manager.get_key(self.CONTEXT,
self.INSTANCE['ephemeral_key_uuid']).get_encoded()
self.lvm = imagebackend.lvm
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
self.dmcrypt = imagebackend.dmcrypt
def _create_image(self, sparse):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE,
context=self.CONTEXT)
fn.assert_called_with(context=self.CONTEXT,
max_size=self.TEMPLATE_SIZE,
target=self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
cmd = ('qemu-img',
'convert',
'-O',
'raw',
self.TEMPLATE_PATH,
self.PATH)
self.utils.execute.assert_called_with(*cmd, run_as_root=True)
def _create_image_generated(self, sparse):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(target=self.PATH,
ephemeral_size=None, context=self.CONTEXT)
def _create_image_resize(self, sparse):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(context=self.CONTEXT, max_size=self.SIZE,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
cmd = ('qemu-img',
'convert',
'-O',
'raw',
self.TEMPLATE_PATH,
self.PATH)
self.utils.execute.assert_called_with(*cmd, run_as_root=True)
self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True)
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
self.lvm.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
max_size=self.SIZE,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(
self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_encrypt_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
self.dmcrypt.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
max_size=self.SIZE,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.dmcrypt.volume_name(self.LV),
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(
target=self.PATH,
ephemeral_size=None,
context=self.CONTEXT)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_encrypt_negative(self):
with contextlib.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.libvirt_utils, 'create_lvm_image',
mock.Mock()),
mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Rbd
super(RbdTestCase, self).setUp()
self.flags(images_rbd_pool=self.POOL,
rbd_user=self.USER,
images_rbd_ceph_conf=self.CONF,
group='libvirt')
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
self.mox.StubOutWithMock(rbd_utils, 'rbd')
self.mox.StubOutWithMock(rbd_utils, 'rados')
def test_cache(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
fn = self.mox.CreateMockAnything()
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(image, 'check_image_exists')
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
image.check_image_exists().AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
fn = self.mox.CreateMockAnything()
fn(max_size=None, target=self.TEMPLATE_PATH)
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(image, 'check_image_exists')
image.check_image_exists().AndReturn(False)
image.check_image_exists().AndReturn(False)
self.mox.ReplayAll()
image.create_image(fn, self.TEMPLATE_PATH, None)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
'--conf', self.CONF)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
self.mox.VerifyAll()
def test_create_image_resize(self):
fn = self.mox.CreateMockAnything()
full_size = self.SIZE * 2
fn(max_size=full_size, target=self.TEMPLATE_PATH)
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(image, 'check_image_exists')
image.check_image_exists().AndReturn(False)
image.check_image_exists().AndReturn(False)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
'--conf', self.CONF)
self.mox.StubOutWithMock(image, 'get_disk_size')
image.get_disk_size(rbd_name).AndReturn(self.SIZE)
self.mox.StubOutWithMock(image.driver, 'resize')
image.driver.resize(rbd_name, full_size)
self.mox.StubOutWithMock(image, 'verify_base_size')
image.verify_base_size(self.TEMPLATE_PATH, full_size)
self.mox.ReplayAll()
image.create_image(fn, self.TEMPLATE_PATH, full_size)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
self.mox.VerifyAll()
def test_create_image_already_exists(self):
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(image, 'check_image_exists')
image.check_image_exists().AndReturn(True)
self.mox.StubOutWithMock(image, 'get_disk_size')
image.get_disk_size(self.TEMPLATE_PATH).AndReturn(self.SIZE)
image.check_image_exists().AndReturn(True)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
image.get_disk_size(rbd_name).AndReturn(self.SIZE)
self.mox.ReplayAll()
fn = self.mox.CreateMockAnything()
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info),
inspect.getargspec(self.image_class.libvirt_info))
def test_image_path(self):
conf = "FakeConf"
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
user, conf)
self.assertEqual(image.path, rbd_path)
def test_get_disk_size(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image.driver, 'size') as size_mock:
size_mock.return_value = 2361393152
self.assertEqual(2361393152, image.get_disk_size(image.path))
size_mock.assert_called_once_with(image.rbd_name)
def test_create_image_too_small(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image, 'driver') as driver_mock:
driver_mock.exists.return_value = True
driver_mock.size.return_value = 2
self.assertRaises(exception.FlavorDiskTooSmall,
image.create_image, mock.MagicMock(),
self.TEMPLATE_PATH, 1)
driver_mock.size.assert_called_once_with(image.rbd_name)
class PloopTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Ploop
super(PloopTestCase, self).setUp()
self.utils = imagebackend.utils
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_create_image(self):
self.stubs.Set(imagebackend.Ploop, 'get_disk_size', lambda a, b: 2048)
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=2048, image_id=None)
img_path = os.path.join(self.PATH, "root.hds")
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, img_path)
self.utils.execute("ploop", "restore-descriptor", "-f", "raw",
self.PATH, img_path)
self.utils.execute("ploop", "grow", '-s', "2K",
os.path.join(self.PATH, "DiskDescriptor.xml"),
run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, 2048, image_id=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
class BackendTestCase(test.NoDBTestCase):
INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
NAME = 'fake-name.suffix'
def setUp(self):
super(BackendTestCase, self).setUp()
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.INSTANCE['ephemeral_key_uuid'] = None
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).image(self.INSTANCE,
self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertIsInstance(instance, class_object, msg=failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_raw(self):
self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
def test_image_raw_preallocate_images(self):
flags = ('space', 'Space', 'SPACE')
for f in flags:
self.flags(preallocate_images=f)
raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(raw.preallocate)
def test_image_raw_preallocate_images_bad_conf(self):
self.flags(preallocate_images='space1')
raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertFalse(raw.preallocate)
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_qcow2_preallocate_images(self):
flags = ('space', 'Space', 'SPACE')
for f in flags:
self.flags(preallocate_images=f)
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(qcow.preallocate)
def test_image_qcow2_preallocate_images_bad_conf(self):
self.flags(preallocate_images='space1')
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertFalse(qcow.preallocate)
def test_image_lvm(self):
self.flags(images_volume_group='FakeVG', group='libvirt')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
def test_image_rbd(self):
conf = "FakeConf"
pool = "FakePool"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.mox.StubOutWithMock(rbd_utils, 'rbd')
self.mox.StubOutWithMock(rbd_utils, 'rados')
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
class UtilTestCase(test.NoDBTestCase):
def test_get_hw_disk_discard(self):
self.assertEqual('unmap', imagebackend.get_hw_disk_discard("unmap"))
self.assertEqual('ignore', imagebackend.get_hw_disk_discard("ignore"))
self.assertIsNone(imagebackend.get_hw_disk_discard(None))
self.assertRaises(RuntimeError, imagebackend.get_hw_disk_discard,
"fake")
|
adelina-t/nova
|
nova/tests/unit/virt/libvirt/test_imagebackend.py
|
Python
|
apache-2.0
| 59,792
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from luigi.contrib.ssh import RemoteContext
import unittest
import subprocess
class TestMockedRemoteContext(unittest.TestCase):
def test_subprocess_delegation(self):
""" Test subprocess call structure using mock module """
orig_Popen = subprocess.Popen
self.last_test = None
def Popen(cmd, **kwargs):
self.last_test = cmd
subprocess.Popen = Popen
context = RemoteContext(
"some_host",
username="luigi",
key_file="/some/key.pub"
)
context.Popen(["ls"])
self.assertTrue("ssh" in self.last_test)
self.assertTrue("-i" in self.last_test)
self.assertTrue("/some/key.pub" in self.last_test)
self.assertTrue("luigi@some_host" in self.last_test)
self.assertTrue("ls" in self.last_test)
subprocess.Popen = orig_Popen
def test_check_output_fail_connect(self):
""" Test check_output to a non-existing host """
context = RemoteContext("__NO_HOST_LIKE_THIS__")
self.assertRaises(
subprocess.CalledProcessError,
context.check_output, ["ls"]
)
|
mortardata/luigi
|
test/test_ssh.py
|
Python
|
apache-2.0
| 1,733
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""remove_issue tests."""
import unittest
import flask
import webtest
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from clusterfuzz._internal.tests.test_libs import test_utils
from handlers.testcase_detail import remove_issue
from libs import form
@test_utils.with_cloud_emulators('datastore')
class HandlerTest(unittest.TestCase):
"""Test Handler."""
def setUp(self):
test_helpers.patch(self, [
'handlers.testcase_detail.show.get_testcase_detail',
'libs.auth.get_current_user',
'libs.auth.is_current_user_admin',
])
self.mock.is_current_user_admin.return_value = True
self.mock.get_testcase_detail.return_value = {'testcase': 'yes'}
self.mock.get_current_user().email = 'test@user.com'
flaskapp = flask.Flask('testflask')
flaskapp.add_url_rule('/', view_func=remove_issue.Handler.as_view('/'))
self.app = webtest.TestApp(flaskapp)
def test_succeed(self):
"""Remove issue from a testcase."""
testcase = data_types.Testcase()
testcase.bug_information = '1234'
testcase.put()
resp = self.app.post_json('/', {
'testcaseId': testcase.key.id(),
'csrf_token': form.generate_csrf_token(),
})
self.assertEqual(200, resp.status_int)
self.assertEqual('yes', resp.json['testcase'])
self.assertEqual('', testcase.key.get().bug_information)
|
google/clusterfuzz
|
src/clusterfuzz/_internal/tests/appengine/handlers/testcase_detail/remove_issue_test.py
|
Python
|
apache-2.0
| 2,010
|
import logging
import secrets
from typing import List, Optional, Tuple
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect, render
from django.utils.cache import patch_cache_control
from zerver.context_processors import get_valid_realm_from_request
from zerver.decorator import web_public_view, zulip_login_required
from zerver.forms import ToSForm
from zerver.lib.actions import do_change_tos_version, realm_user_count
from zerver.lib.compatibility import is_outdated_desktop_app, is_unsupported_browser
from zerver.lib.home import build_page_params_for_home_page_load, get_user_permission_info
from zerver.lib.request import RequestNotes
from zerver.lib.streams import access_stream_by_name
from zerver.lib.subdomains import get_subdomain
from zerver.lib.utils import statsd
from zerver.models import PreregistrationUser, Realm, Stream, UserProfile
from zerver.views.portico import hello_view
def need_accept_tos(user_profile: Optional[UserProfile]) -> bool:
if user_profile is None:
return False
if settings.TERMS_OF_SERVICE is None: # nocoverage
return False
if settings.TOS_VERSION is None:
return False
return int(settings.TOS_VERSION.split(".")[0]) > user_profile.major_tos_version()
@zulip_login_required
def accounts_accept_terms(request: HttpRequest) -> HttpResponse:
assert request.user.is_authenticated
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
do_change_tos_version(request.user, settings.TOS_VERSION)
return redirect(home)
else:
form = ToSForm()
email = request.user.delivery_email
special_message_template = None
if request.user.tos_version is None and settings.FIRST_TIME_TOS_TEMPLATE is not None:
special_message_template = "zerver/" + settings.FIRST_TIME_TOS_TEMPLATE
return render(
request,
"zerver/accounts_accept_terms.html",
context={
"form": form,
"email": email,
"special_message_template": special_message_template,
},
)
def detect_narrowed_window(
request: HttpRequest, user_profile: Optional[UserProfile]
) -> Tuple[List[List[str]], Optional[Stream], Optional[str]]:
"""This function implements Zulip's support for a mini Zulip window
that just handles messages from a single narrow"""
if user_profile is None:
return [], None, None
narrow: List[List[str]] = []
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
# TODO: We should support stream IDs and PMs here as well.
narrow_stream_name = request.GET.get("stream")
(narrow_stream, ignored_sub) = access_stream_by_name(user_profile, narrow_stream_name)
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.warning("Invalid narrow requested, ignoring", extra=dict(request=request))
if narrow_stream is not None and narrow_topic is not None:
narrow.append(["topic", narrow_topic])
return narrow, narrow_stream, narrow_topic
def update_last_reminder(user_profile: Optional[UserProfile]) -> None:
"""Reset our don't-spam-users-with-email counter since the
user has since logged in
"""
if user_profile is None:
return
if user_profile.last_reminder is not None: # nocoverage
# TODO: Look into the history of last_reminder; we may have
# eliminated that as a useful concept for non-bot users.
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
def home(request: HttpRequest) -> HttpResponse:
if not settings.ROOT_DOMAIN_LANDING_PAGE:
return home_real(request)
# If settings.ROOT_DOMAIN_LANDING_PAGE, sends the user the landing
# page, not the login form, on the root domain
subdomain = get_subdomain(request)
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
return home_real(request)
return hello_view(request)
@web_public_view
def home_real(request: HttpRequest) -> HttpResponse:
# Before we do any real work, check if the app is banned.
client_user_agent = request.META.get("HTTP_USER_AGENT", "")
(insecure_desktop_app, banned_desktop_app, auto_update_broken) = is_outdated_desktop_app(
client_user_agent
)
if banned_desktop_app:
return render(
request,
"zerver/insecure_desktop_app.html",
context={
"auto_update_broken": auto_update_broken,
},
)
(unsupported_browser, browser_name) = is_unsupported_browser(client_user_agent)
if unsupported_browser:
return render(
request,
"zerver/unsupported_browser.html",
context={
"browser_name": browser_name,
},
)
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
if request.user.is_authenticated:
user_profile = request.user
realm = user_profile.realm
else:
# user_profile=None corresponds to the logged-out "web_public" visitor case.
user_profile = None
realm = get_valid_realm_from_request(request)
update_last_reminder(user_profile)
statsd.incr("views.home")
# If a user hasn't signed the current Terms of Service, send them there
if need_accept_tos(user_profile):
return accounts_accept_terms(request)
narrow, narrow_stream, narrow_topic = detect_narrowed_window(request, user_profile)
if user_profile is not None:
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = (
first_in_realm
and not PreregistrationUser.objects.filter(referred_by=user_profile).count()
)
needs_tutorial = user_profile.tutorial_status == UserProfile.TUTORIAL_WAITING
else:
first_in_realm = False
prompt_for_invites = False
# The current tutorial doesn't super make sense for logged-out users.
needs_tutorial = False
queue_id, page_params = build_page_params_for_home_page_load(
request=request,
user_profile=user_profile,
realm=realm,
insecure_desktop_app=insecure_desktop_app,
narrow=narrow,
narrow_stream=narrow_stream,
narrow_topic=narrow_topic,
first_in_realm=first_in_realm,
prompt_for_invites=prompt_for_invites,
needs_tutorial=needs_tutorial,
)
log_data = RequestNotes.get_notes(request).log_data
assert log_data is not None
log_data["extra"] = f"[{queue_id}]"
csp_nonce = secrets.token_hex(24)
user_permission_info = get_user_permission_info(user_profile)
response = render(
request,
"zerver/app/index.html",
context={
"user_profile": user_profile,
"page_params": page_params,
"csp_nonce": csp_nonce,
"color_scheme": user_permission_info.color_scheme,
},
)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request: HttpRequest) -> HttpResponse:
return redirect(home)
|
rht/zulip
|
zerver/views/home.py
|
Python
|
apache-2.0
| 7,645
|
import json
from typing import TYPE_CHECKING, Optional
from boxsdk.util.text_enum import TextEnum
from boxsdk.exception import BoxAPIException
from .base_object import BaseObject
if TYPE_CHECKING:
from boxsdk.object.user import User
from boxsdk.object.terms_of_service_user_status import TermsOfServiceUserStatus
class TermsOfServiceType(TextEnum):
"""An enum of possible terms of service types"""
MANAGED = 'managed'
EXTERNAL = 'external'
class TermsOfServiceStatus(TextEnum):
"""An enum of possible terms of service status"""
ENABLED = 'enabled'
DISABLED = 'disabled'
class TermsOfService(BaseObject):
"""Represents a Box terms of service."""
_item_type = 'terms_of_service'
def get_user_status(self, user: Optional['User'] = None) -> 'TermsOfServiceUserStatus':
"""
Get the terms of service user status.
:param user:
This is the user to get the status of the terms of service for. This defaults to current
user.
:returns:
A :class:`TermsOfServiceUserStatus` object
"""
url = self._session.get_url('terms_of_service_user_statuses')
additional_params = {
'tos_id': self.object_id,
}
if user is not None:
additional_params['user_id'] = user.object_id
box_response = self._session.get(url, params=additional_params)
response_object = box_response.json()
response = response_object['entries'][0]
return self.translator.translate(
session=self._session,
response_object=response,
)
def accept(self, user: Optional['User'] = None) -> 'TermsOfServiceUserStatus':
"""
Accept a terms of service.
:param user:
The :class:`User` to assign the terms of service to.
:returns:
A newly created :class:`TermsOfServiceUserStatus` object
"""
return self.set_user_status(is_accepted=True, user=user)
def reject(self, user: Optional['User'] = None) -> 'TermsOfServiceUserStatus':
"""
Reject a terms of service.
:param user:
The :class:`User` to assign the terms of service to.
:returns:
A newly created :class:`TermsOfServiceUserStatus` object
"""
return self.set_user_status(is_accepted=False, user=user)
def set_user_status(self, is_accepted: bool, user: Optional['User'] = None) -> 'TermsOfServiceUserStatus':
"""
Create a terms of service user status.
:param is_accepted:
Indicates whether a use has accepted or rejected a terms of service.
:param user:
The :class:`User` to assign the terms of service to.
:returns:
A newly created :class:`TermsOfServiceUserStatus` object
"""
url = self._session.get_url('terms_of_service_user_statuses')
body = {
'tos': {
'type': self.object_type,
'id': self.object_id,
},
'is_accepted': is_accepted,
}
if user is not None:
body['user'] = {
'type': user.object_type,
'id': user.object_id,
}
translated_response = None
try:
box_response = self._session.post(url, data=json.dumps(body))
response = box_response.json()
translated_response = self.translator.translate(
session=self._session,
response_object=response,
)
except BoxAPIException as err:
if err.status == 409:
user_status = self.get_user_status(user)
translated_response = user_status.update_info(data={'is_accepted': is_accepted})
return translated_response
|
box/box-python-sdk
|
boxsdk/object/terms_of_service.py
|
Python
|
apache-2.0
| 3,849
|
# -*- coding: utf-8 -*-
import json
import os
import time
import psutil
import pyautogui
pubg_url = 'steam://rungameid/578080'
PROCNAME = "TslGame.exe"
CRASH_PROCNAME = "BroCrashReporter.exe"
debug_directory = "debug_screenshots"
start_state = "HELLO"
play_state = "PLAYING"
play_timer_max = 60 * 3
matching_state = "MATCHING"
matching_timer_max = 60 * 3
loading_state = "LOADING"
loading_timer_max = 60 * 3
gameloading_state = "GAME IS LOADING"
gameloading_timer_max = 60 * 3
state = start_state
takeScrenshot = True
timer = 0.0
def getConfig():
with open('config.json', encoding='UTF-8') as data_file:
data = json.load(data_file)
return data
def getpixel(x, y):
return pyautogui.screenshot().getpixel((x, y))
def pixelMatchesColor(x, y, expectedRGBColor, tolerance=0):
pix = getpixel(x,y)
if len(pix) == 3 or len(expectedRGBColor) == 3: # RGB mode
r, g, b = pix[:3]
exR, exG, exB = expectedRGBColor[:3]
return (abs(r - exR) <= tolerance) and (abs(g - exG) <= tolerance) and (abs(b - exB) <= tolerance)
elif len(pix) == 4 and len(expectedRGBColor) == 4: # RGBA mode
r, g, b, a = pix
exR, exG, exB, exA = expectedRGBColor
return (abs(r - exR) <= tolerance) and (abs(g - exG) <= tolerance) and (abs(b - exB) <= tolerance) and (
abs(a - exA) <= tolerance)
else:
assert False, 'Color mode was expected to be length 3 (RGB) or 4 (RGBA), but pixel is length %s and expectedRGBColor is length %s' % (
len(pix), len(expectedRGBColor))
def printScreen(message):
if takeScrenshot:
if not os.path.exists(debug_directory):
os.makedirs(debug_directory)
pyautogui.screenshot('{}/{}{}.png'.format(debug_directory, time.strftime("%m.%d %H.%M.%S", time.gmtime()), message))
def changeState(value):
global state, timer
state = value
timer = 0
def killGame():
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == PROCNAME:
proc.kill()
def matchesButton(position):
if pixelMatchesColor(position[0], position[1], white_button,
tolerance=color_tolerance) or pixelMatchesColor(position[0],
position[1],
gray_button,
tolerance=color_tolerance) \
or pixelMatchesColor(position[0],
position[1],
super_white_button,
tolerance=color_tolerance) or pixelMatchesColor(
position[0], position[1], golden_button, tolerance=color_tolerance):
return True
return False
def isGameRunning():
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == PROCNAME:
return True
else:
return False
def checkTimer():
global state
if state == loading_state and timer > loading_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
elif state == matching_state and timer > matching_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
elif state == play_state and timer > play_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
elif state == gameloading_state and timer > gameloading_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
config = getConfig()
# Menu
print('By using this software you agree with license! You can find it in code.')
print('Choose a server:')
number = 1
for server in config['servers']:
print('{}. {}'.format(number, server['title']))
number += 1
inp = int(input('Type number: '))
inp -= 1
server_position = (config['servers'][inp]['x'], config['servers'][inp]['y'], config['servers'][inp]['title'])
print('Choose a mod:')
number = 1
for server in config['modes']:
print('{}. {}'.format(number, server['title']))
number += 1
inp = int(input('Type number: '))
inp -= 1
print('Can I take screenshots if something wrong happens? (y/N)')
if input().lower() == 'y':
print('Thanks')
else:
print("Well, if something will go wrong, then I can't help you")
takeScrenshot = False
# Position init
mode_position = (config['modes'][inp]['x'], config['modes'][inp]['y'], config['modes'][inp]['title'])
mode_tick_position = (config['modes'][inp]['tick']['x'], config['modes'][inp]['tick']['y'])
play_button_position = (config['play_button']['x'], config['play_button']['y'])
play_state_position = (config['play_state']['x'], config['play_state']['y'])
text_position = (config['text']['x'], config['text']['y'])
exit_position = (config['exit_to_lobby']['x'], config['exit_to_lobby']['y'])
error_position_check = (config['error_position']['x'], config['error_position']['y'])
error_ok_position = (config['error_ok_position']['x'], config['error_ok_position']['y'])
game_message_position = (config['game_message_position']['x'], config['game_message_position']['y'])
exit_button_position = (config['exit_button_position']['x'], config['exit_button_position']['y'])
reconnect_button_position = (config['reconnect_button_position']['x'], config['reconnect_button_position']['y'])
# Reading timings
refresh_rate = config["timers"]["refresh_rate"]
wait_after_killing_a_game = config["timers"]["wait_after_killing_a_game"]
start_delay = config["timers"]["start_delay"]
animation_delay = config["timers"]["animation_delay"]
wait_for_players = config["timers"]["wait_for_players"]
wait_for_plain = config["timers"]["wait_for_plain"]
exit_animation_delay = config["timers"]["exit_animation_delay"]
loading_delay = config["timers"]["loading_delay"]
# Colors
def getColor(config, name):
return (config["colors"][name]["r"], config["colors"][name]["g"], config["colors"][name]["b"])
color_tolerance = config["color_tolerance"]
dark_play_color = getColor(config, "dark_play_color")
play_color = getColor(config, "play_color")
matching_color = getColor(config, "matching_color")
matching_tick_color = getColor(config, "matching_tick_color")
text_start_color = getColor(config, "text_start_color")
white_button = getColor(config, "white_button")
gray_button = getColor(config, "gray_button")
golden_button = getColor(config, "golden_button")
super_white_button = getColor(config, "super_white_button")
windows_background = getColor(config, "windows_background")
exit_button_color = getColor(config, "exit_button_color")
reconnect_button_color = getColor(config, "reconnect_button_color")
# Game info
print('Server: {}. Mode: {}'.format(server_position[2], mode_position[2]))
while (1):
try:
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == CRASH_PROCNAME:
print('Fucking bugs in PUBG. Trying to avoid them!')
proc.kill()
killGame()
time.sleep(wait_after_killing_a_game)
changeState(start_state)
except Exception as ex:
print('Something went wrong while killing bug reporter... Error message: {}'.format(ex))
if state == start_state:
if pixelMatchesColor(error_position_check[0], error_position_check[1], windows_background,
tolerance=color_tolerance):
pyautogui.press('enter')
pyautogui.click(error_ok_position[0], error_ok_position[1])
killGame()
time.sleep(wait_after_killing_a_game)
try:
os.startfile(pubg_url)
changeState(loading_state)
time.sleep(start_delay)
print('Loading PUBG')
except Exception as ex:
print('Something went wrong while starating PUBG... Error message: {}'.format(ex))
elif state == loading_state:
if pixelMatchesColor(play_state_position[0], play_state_position[1], play_color,
tolerance=color_tolerance) or pixelMatchesColor(play_state_position[0],
play_state_position[1],
dark_play_color,
tolerance=color_tolerance):
pyautogui.moveTo(play_button_position[0], play_button_position[1])
time.sleep(animation_delay)
# Pick a server
pyautogui.click(server_position[0], server_position[1])
time.sleep(animation_delay)
pyautogui.click(mode_position[0], mode_position[1])
time.sleep(animation_delay)
if pixelMatchesColor(mode_tick_position[0], mode_tick_position[1], matching_tick_color,
tolerance=color_tolerance):
pyautogui.click(mode_tick_position[0], mode_tick_position[1])
pyautogui.click(play_button_position[0], play_button_position[1])
changeState(matching_state)
time.sleep(loading_delay)
print('Starting matchmaking...')
elif pixelMatchesColor(text_position[0], text_position[1], text_start_color, tolerance=color_tolerance):
print('I see text, so the game is probably ready...')
changeState(play_state)
elif pixelMatchesColor(reconnect_button_position[0], reconnect_button_position[1], reconnect_button_color, tolerance=color_tolerance):
print('Nice orange button? I\'ll press it!')
pyautogui.click(reconnect_button_position[0], reconnect_button_position[1])
time.sleep(animation_delay)
elif matchesButton(game_message_position):
print("Game's message was denied")
pyautogui.click(game_message_position[0], game_message_position[1])
elif not pixelMatchesColor(exit_button_position[0], exit_button_position[1], exit_button_color, tolerance=color_tolerance) \
and not pixelMatchesColor(exit_button_position[0], exit_button_position[1], matching_tick_color, tolerance=color_tolerance)\
and timer > 30 and isGameRunning():
print('I can\'t see exit button, so the game is probably ready...')
time.sleep(wait_for_players)
changeState(play_state)
elif state == matching_state:
if pixelMatchesColor(play_state_position[0], play_state_position[1], play_color,
tolerance=color_tolerance) or pixelMatchesColor(play_state_position[0],
play_state_position[1],
dark_play_color,
tolerance=color_tolerance):
changeState(loading_state)
time.sleep(loading_delay)
if not pixelMatchesColor(play_state_position[0], play_state_position[1], matching_color,
tolerance=color_tolerance):
if pixelMatchesColor(play_state_position[0], play_state_position[1], matching_tick_color,
tolerance=color_tolerance):
changeState(gameloading_state)
time.sleep(loading_delay)
print('Session is loading')
elif state == gameloading_state:
if not pixelMatchesColor(play_state_position[0], play_state_position[1], matching_tick_color,
tolerance=color_tolerance):
print('Loading is complete')
time.sleep(wait_for_players)
changeState(play_state)
elif state == play_state:
# print(text_position[0], text_position[1])
if not pixelMatchesColor(text_position[0], text_position[1], text_start_color, tolerance=color_tolerance):
time.sleep(wait_for_plain)
pyautogui.press('esc')
time.sleep(animation_delay)
pyautogui.click(exit_position[0], exit_position[1])
time.sleep(exit_animation_delay)
pyautogui.click(exit_position[0], exit_position[1])
changeState(loading_state)
print('Going in menu. Loading again')
time.sleep(10)
time.sleep(refresh_rate)
timer += refresh_rate
checkTimer()
|
fuckpubg/AHK
|
botpubg-mpgh/SuperBot.py
|
Python
|
apache-2.0
| 12,651
|
# file test_fedora/test_models.py
#
# Copyright 2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
import logging
from lxml import etree
from mock import patch, Mock
import os
from rdflib import URIRef, Graph as RdfGraph, XSD, Literal
from rdflib.namespace import Namespace
import re
import tempfile
import six
from eulfedora import models
from eulfedora.api import ApiFacade
from eulfedora.rdfns import relsext, model as modelns
from eulfedora.util import RequestFailed, fedoratime_to_datetime, md5sum, \
force_bytes, force_text
from eulfedora.xml import ObjectDatastream, FEDORA_MANAGE_NS, FoxmlDigitalObject, \
AuditTrail, AuditTrailRecord
from eulxml.xmlmap.dc import DublinCore
from test.test_fedora.base import FedoraTestCase, FIXTURE_ROOT
from test.testsettings import FEDORA_PIDSPACE
logger = logging.getLogger(__name__)
ONE_SEC = timedelta(seconds=1)
TWO_SECS = timedelta(seconds=2)
class MyDigitalObject(models.DigitalObject):
CONTENT_MODELS = ['info:fedora/%s:ExampleCModel' % FEDORA_PIDSPACE,
'info:fedora/%s:AnotherCModel' % FEDORA_PIDSPACE]
# extend digital object with datastreams for testing
text = models.Datastream("TEXT", "Text datastream", defaults={
'mimetype': 'text/plain',
})
extradc = models.XmlDatastream("EXTRADC", "Managed DC XML datastream", DublinCore,
defaults={
'mimetype': 'application/xml',
'versionable': True,
})
image = models.FileDatastream('IMAGE', 'managed binary image datastream', defaults={
'mimetype': 'image/png',
})
class SimpleDigitalObject(models.DigitalObject):
CONTENT_MODELS = ['info:fedora/%s:SimpleObject' % FEDORA_PIDSPACE]
# extend digital object with datastreams for testing
text = models.Datastream("TEXT", "Text datastream", defaults={
'mimetype': 'text/plain',
})
extradc = models.XmlDatastream("EXTRADC", "Managed DC XML datastream", DublinCore)
TEXT_CONTENT = "Here is some text content for a non-xml datastream."
def _add_text_datastream(obj):
# add a text datastream to the current test object
FILE = tempfile.NamedTemporaryFile(mode="w", suffix=".txt")
FILE.write(TEXT_CONTENT)
FILE.flush()
# info for calling addDatastream, and return
ds = {'id': 'TEXT', 'label': 'text datastream', 'mimeType': 'text/plain',
'controlGroup': 'M', 'logMessage': "creating new datastream",
'versionable': False, 'checksumType': 'MD5'}
with open(FILE.name) as tmpfile:
obj.api.addDatastream(obj.pid, ds['id'], ds['label'],
ds['mimeType'], ds['logMessage'], ds['controlGroup'], content=tmpfile,
checksumType=ds['checksumType'], versionable=ds['versionable'])
FILE.close()
class TestDatastreams(FedoraTestCase):
fixtures = ['object-with-pid.foxml']
pidspace = FEDORA_PIDSPACE
def setUp(self):
super(TestDatastreams, self).setUp()
self.pid = self.fedora_fixtures_ingested[-1] # get the pid for the last object
self.obj = MyDigitalObject(self.api, self.pid)
# add a text datastream to the current test object
_add_text_datastream(self.obj)
# get fixture ingest time from the server (the hard way) for testing
r = self.obj.api.getDatastream(self.pid, "DC")
dsprofile_node = etree.fromstring(r.content, base_url=r.url)
created_s = dsprofile_node.xpath('string(m:dsCreateDate)',
namespaces={'m': FEDORA_MANAGE_NS})
self.ingest_time = fedoratime_to_datetime(created_s)
def test_get_ds_content(self):
dc = self.obj.dc.content
self.assert_(isinstance(self.obj.dc, models.XmlDatastreamObject))
self.assert_(isinstance(dc, DublinCore))
self.assertEqual(dc.title, "A partially-prepared test object")
self.assertEqual(dc.identifier, self.pid)
self.assert_(isinstance(self.obj.text, models.DatastreamObject))
self.assertEqual(force_text(self.obj.text.content), TEXT_CONTENT)
def test_get_ds_info(self):
self.assertEqual(self.obj.dc.label, "Dublin Core")
self.assertEqual(self.obj.dc.mimetype, "text/xml")
self.assertEqual(self.obj.dc.state, "A")
self.assertEqual(self.obj.dc.versionable, True)
self.assertEqual(self.obj.dc.control_group, "X")
# there may be micro-second variation between these two
# ingest/creation times, but they should probably be less than
# a second or two apart
try:
self.assertAlmostEqual(self.ingest_time, self.obj.dc.created,
delta=TWO_SECS)
except TypeError:
# delta keyword unavailable before python 2.7
self.assert_(abs(self.ingest_time - self.obj.dc.created) < TWO_SECS)
# short-cut to datastream size
self.assertEqual(self.obj.dc.info.size, self.obj.dc.size)
self.assertEqual(self.obj.text.label, "text datastream")
self.assertEqual(self.obj.text.mimetype, "text/plain")
self.assertEqual(self.obj.text.state, "A")
self.assertEqual(self.obj.text.versionable, False)
self.assertEqual(self.obj.text.control_group, "M")
try:
self.assertAlmostEqual(self.ingest_time, self.obj.text.created,
delta=TWO_SECS)
except TypeError:
# delta keyword unavailable before python 2.7
self.assert_(abs(self.ingest_time - self.obj.text.created) < TWO_SECS)
# bootstrap info from defaults for a new object
newobj = MyDigitalObject(self.api)
self.assertEqual('Text datastream', newobj.text.label,
'default label should be set on new datastream')
self.assertEqual('text/plain', newobj.text.mimetype,
'default label should be set on new datastream')
self.assertEqual('MD5', newobj.text.checksum_type,
'default checksum type should be set on new datastream')
def test_savedatastream(self):
new_text = "Here is some totally new text content."
self.obj.text.content = new_text
self.obj.text.label = "new ds label"
self.obj.text.mimetype = "text/other"
self.obj.text.versionable = False
self.obj.text.state = "I"
self.obj.text.format = "some.format.uri"
saved = self.obj.text.save("changed text")
self.assertTrue(saved, "saving TEXT datastream should return true")
self.assertEqual(self.obj.text.content, new_text)
# compare with the datastream pulled directly from Fedora
r = self.obj.api.getDatastreamDissemination(self.pid, self.obj.text.id)
self.assertEqual(r.text, new_text)
r = self.obj.api.getDatastream(self.pid, self.obj.text.id)
dsinfo = r.text
self.assert_("<dsLabel>new ds label</dsLabel>" in dsinfo)
self.assert_("<dsMIME>text/other</dsMIME>" in dsinfo)
self.assert_("<dsVersionable>false</dsVersionable>" in dsinfo)
self.assert_("<dsState>I</dsState>" in dsinfo)
self.assert_("<dsFormatURI>some.format.uri</dsFormatURI>" in dsinfo)
# checksum not sent - fedora should calculate one for us
self.assert_("<dsChecksum>%s</dsChecksum>" % md5sum(force_bytes(new_text))
in dsinfo)
# look for log message ?
self.obj.dc.content.title = "this is a new title"
saved = self.obj.dc.save("changed DC title")
self.assertTrue(saved, "saving DC datastream should return true")
r = self.obj.api.getDatastreamDissemination(self.pid, self.obj.dc.id)
self.assert_("<dc:title>this is a new title</dc:title>" in r.text)
def test_save_by_location(self):
file_uri = 'file:///tmp/rsk-test.txt'
# since we can't put or guarantee a test file on the fedora server,
# patch the api with Mock to check api call
with patch.object(ApiFacade, 'modifyDatastream') as mock_mod_ds:
mock_mod_ds.return_value = Mock(status_code=200, content='saved')
self.obj.text.ds_location = file_uri
self.obj.text.content = 'this content should be ignored'
logmsg = 'text content from file uri'
saved = self.obj.text.save(logmsg)
self.assertTrue(saved)
mock_mod_ds.assert_called_with(self.obj.pid, self.obj.text.id,
mimeType='text/plain', dsLocation=file_uri,
logMessage=logmsg)
self.assertEqual(None, self.obj.text.ds_location,
'ds_location should be None after successful save')
# simulate save failure (without an exception)
mock_mod_ds.return_value = Mock(status_code=304)
self.obj.text.ds_location = file_uri
saved = self.obj.text.save(logmsg)
self.assertFalse(saved)
self.assertNotEqual(None, self.obj.text.ds_location,
'ds_location should not be None after failed save')
# purge ds and test addDatastream
self.obj.api.purgeDatastream(self.obj.pid, self.obj.text.id)
# load a new version that knows text ds doesn't exist
obj = MyDigitalObject(self.api, self.pid)
with patch.object(ApiFacade, 'addDatastream') as mock_add_ds:
mock_add_ds.return_value = Mock(status_code=201, content='added')
obj.text.ds_location = file_uri
obj.text.content = 'this content should be ignored'
logmsg = 'text content from file uri'
saved = obj.text.save(logmsg)
self.assertTrue(saved)
mock_add_ds.assert_called_with(self.obj.pid, self.obj.text.id,
mimeType='text/plain', dsLocation=file_uri,
logMessage=logmsg, controlGroup='M')
self.assertEqual(None, obj.text.ds_location,
'ds_location should be None after successful save (add)')
def test_ds_isModified(self):
self.assertFalse(self.obj.text.isModified(),
"isModified should return False for unchanged DC datastream")
self.assertFalse(self.obj.dc.isModified(),
"isModified should return False for unchanged DC datastream")
self.obj.text.label = "next text label"
self.assertTrue(self.obj.text.isModified(),
"isModified should return True when text datastream label has been updated")
self.obj.dc.content.description = "new datastream contents"
self.assertTrue(self.obj.dc.isModified(),
"isModified should return True when DC datastream content has changed")
self.obj.text.save()
self.obj.dc.save()
self.assertFalse(self.obj.text.isModified(),
"isModified should return False after text datastream has been saved")
self.assertFalse(self.obj.dc.isModified(),
"isModified should return False after DC datastream has been saved")
# empty xml should also show as not modified
self.assertFalse(self.obj.extradc.isModified())
def test_rdf_datastream(self):
# add a relationship to test RELS-EXT/rdf datastreams
foo123 = "info:fedora/foo:123"
self.obj.add_relationship(relsext.isMemberOf, foo123)
self.assert_(isinstance(self.obj.rels_ext, models.RdfDatastreamObject))
self.assert_(isinstance(self.obj.rels_ext.content, RdfGraph))
self.assert_((self.obj.uriref, relsext.isMemberOf, URIRef(foo123)) in
self.obj.rels_ext.content)
def test_file_datastream(self):
# confirm the image datastream does not exist, so we can test adding it
self.assertFalse(self.obj.image.exists)
# add file datastream to test object
filename = os.path.join(FIXTURE_ROOT, 'test.png')
with open(filename, mode='rb') as imgfile:
self.obj.image.content = imgfile
imgsaved = self.obj.save()
self.assertTrue(imgsaved)
# datastream should exist now
self.assertTrue(self.obj.image.exists)
# file content should be reset
self.assertEqual(None, self.obj.image._raw_content())
self.assertFalse(self.obj.image.isModified(),
"isModified should return False for image datastream after it has been saved")
# access via file datastream descriptor
self.assert_(isinstance(self.obj.image, models.FileDatastreamObject))
self.assertEqual(self.obj.image.content.read(), open(filename, mode='rb').read())
# update via descriptor
new_file = os.path.join(FIXTURE_ROOT, 'test.jpeg')
self.obj.image.content = open(new_file, mode='rb')
self.obj.image.checksum = 'aaa'
self.assertTrue(self.obj.image.isModified())
#Saving with incorrect checksum should fail.
expected_error = None
try:
self.obj.save()
except models.DigitalObjectSaveFailure as e:
#Error should go here
expected_error = e
self.assert_(str(expected_error).endswith('successfully backed out '), 'Incorrect checksum should back out successfully.')
#Now try with correct checksum
self.obj.image.content = open(new_file, mode='rb')
self.obj.image.checksum = '57d5eb11a19cf6f67ebd9e8673c9812e'
return_status = self.obj.save()
self.fedora_fixtures_ingested.append(self.obj.pid)
self.assertEqual(True, return_status)
# grab a new copy from fedora, confirm contents match
obj = MyDigitalObject(self.api, self.pid)
self.assertEqual(obj.image.content.read(), open(new_file, mode='rb').read())
self.assertEqual(obj.image.checksum, '57d5eb11a19cf6f67ebd9e8673c9812e')
def test_undo_last_save(self):
# test undoing profile and content changes
# unversioned datastream
self.obj.text.label = "totally new label"
self.obj.text.content = "and totally new content, too"
self.obj.text.save()
self.append_pid(self.obj.pid)
self.assertTrue(self.obj.text.undo_last_save())
history = self.obj.text.history()
self.assertEqual("text datastream", history.versions[0].label)
r = self.obj.api.getDatastreamDissemination(self.pid, self.obj.text.id)
self.assertEqual(TEXT_CONTENT, r.text)
# versioned datastream
self.obj.dc.label = "DC 2.0"
self.obj.dc.title = "my new DC"
self.obj.dc.save()
self.assertTrue(self.obj.dc.undo_last_save())
history = self.obj.dc.history()
self.assertEqual(1, len(history.versions)) # new datastream added, then removed - back to 1 version
self.assertEqual("Dublin Core", history.versions[0].label)
r = self.obj.api.getDatastreamDissemination(self.pid, self.obj.dc.id)
self.assert_('<dc:title>A partially-prepared test object</dc:title>' in r.text)
# unversioned - profile change only
self.obj = MyDigitalObject(self.api, self.pid)
self.obj.text.label = "totally new label"
self.obj.text.save()
self.assertTrue(self.obj.text.undo_last_save())
history = self.obj.text.history()
self.assertEqual("text datastream", history.versions[0].label)
r = self.obj.api.getDatastreamDissemination(self.pid, self.obj.text.id)
self.assertEqual(TEXT_CONTENT, r.text)
def test_get_chunked_content(self):
# get chunks - chunksize larger than entire text content
chunks = list(self.obj.text.get_chunked_content(1024))
self.assertEqual(self.obj.text.content, chunks[0])
# smaller chunksize
chunks = list(self.obj.text.get_chunked_content(10))
self.assertEqual(self.obj.text.content[:10], chunks[0])
self.assertEqual(self.obj.text.content[10:20], chunks[1])
def test_datastream_version(self):
# modify dc & save to create a second version
self.obj.dc.content.description = "new datastream contents"
self.obj.dc.save()
# get the two versions ds obj
dc_v0 = self.obj.getDatastreamObject(self.obj.dc.id,
as_of_date=self.obj.dc.history().versions[0].created)
dc_v1 = self.obj.getDatastreamObject(self.obj.dc.id,
as_of_date=self.obj.dc.history().versions[1].created)
# ds info should be different
self.assertNotEqual(dc_v0.created, dc_v1.created)
self.assertNotEqual(dc_v0.size, dc_v1.size)
self.assertNotEqual(dc_v0.checksum, dc_v1.checksum)
# ds content should be different
self.assertNotEqual(dc_v0.content, dc_v1.content)
# saving a historical version is not allowed
self.assertRaises(RuntimeError, dc_v0.save)
class TestNewObject(FedoraTestCase):
pidspace = FEDORA_PIDSPACE
def test_basic_ingest(self):
self.repo.default_pidspace = self.pidspace
obj = self.repo.get_object(type=MyDigitalObject)
self.assertFalse(isinstance(obj.pid, six.string_types))
obj.save()
self.append_pid(obj.pid)
self.assertTrue(isinstance(obj.pid, six.string_types))
self.append_pid(obj.pid)
fetched = self.repo.get_object(obj.pid, type=MyDigitalObject)
self.assertEqual(fetched.dc.content.identifier, obj.pid)
# confirm that fedora generates a checksum for us
r = obj.api.getDatastream(obj.pid, obj.dc.id)
dsinfo = r.text
self.assert_(re.search("<dsChecksum>[0-9a-f]+</dsChecksum>", dsinfo),
'Fedora should automatically generated a datastream checksum on ingest ' +
'(requires auto-checksum enabled and Fedora 3.7+)')
def test_ingest_content_uri(self):
obj = self.repo.get_object(type=MyDigitalObject)
obj.pid = 'test:1'
obj.text.ds_location = 'file:///tmp/some/local/file.txt'
# don't actually save, since we can't put a test file on the fedora test server
foxml = obj._build_foxml_doc()
# inspect TEXT datastream contentLocation in the generated foxml
text_dsloc = foxml.xpath('.//f:datastream[@ID="TEXT"]/' +
'f:datastreamVersion/f:contentLocation',
namespaces={'f': obj.FOXML_NS})[0]
self.assertEqual(obj.text.ds_location, text_dsloc.get('REF'))
self.assertEqual('URL', text_dsloc.get('TYPE'))
def test_modified_profile(self):
obj = self.repo.get_object(type=MyDigitalObject)
obj.label = 'test label'
obj.owner = 'tester'
obj.state = 'I'
obj.save()
self.append_pid(obj.pid)
self.assertEqual(obj.label, 'test label')
self.assertEqual(obj.owner, 'tester')
self.assertEqual(obj.state, 'I')
fetched = self.repo.get_object(obj.pid, type=MyDigitalObject)
self.assertEqual(fetched.label, 'test label')
self.assertEqual(fetched.owner, 'tester')
self.assertEqual(fetched.state, 'I')
def test_multiple_owners(self):
obj = self.repo.get_object(type=MyDigitalObject)
obj.owner = 'thing1, thing2'
self.assert_(isinstance(obj.owners, list))
self.assertEqual(['thing1', 'thing2'], obj.owners)
obj.owner = ' thing1, thing2 '
self.assertEqual(['thing1', 'thing2'], obj.owners)
def test_default_datastreams(self):
"""If we just create and save an object, verify that DigitalObject
initializes its datastreams appropriately."""
obj = self.repo.get_object(type=MyDigitalObject)
obj.save()
self.append_pid(obj.pid)
# verify some datastreams on the original object
# fedora treats dc specially
self.assertEqual(obj.dc.label, 'Dublin Core')
self.assertEqual(obj.dc.mimetype, 'text/xml')
self.assertEqual(obj.dc.versionable, False)
self.assertEqual(obj.dc.state, 'A')
self.assertEqual(obj.dc.format, 'http://www.openarchives.org/OAI/2.0/oai_dc/')
self.assertEqual(obj.dc.control_group, 'X')
self.assertEqual(obj.dc.content.identifier, obj.pid) # fedora sets this automatically
# test rels-ext as an rdf datastream
self.assertEqual(obj.rels_ext.label, 'External Relations')
self.assertEqual(obj.rels_ext.mimetype, 'application/rdf+xml')
self.assertEqual(obj.rels_ext.versionable, False)
self.assertEqual(obj.rels_ext.state, 'A')
self.assertEqual(obj.rels_ext.format, 'info:fedora/fedora-system:FedoraRELSExt-1.0')
self.assertEqual(obj.rels_ext.control_group, 'X')
self.assertTrue(isinstance(obj.rels_ext.content, RdfGraph))
self.assert_((obj.uriref, modelns.hasModel, URIRef(MyDigitalObject.CONTENT_MODELS[0])) in
obj.rels_ext.content)
self.assert_((obj.uriref, modelns.hasModel, URIRef(MyDigitalObject.CONTENT_MODELS[0])) in
obj.rels_ext.content)
# test managed xml datastreams
self.assertEqual(obj.extradc.label, 'Managed DC XML datastream')
self.assertEqual(obj.extradc.mimetype, 'application/xml')
self.assertEqual(obj.extradc.versionable, True)
self.assertEqual(obj.extradc.state, 'A')
self.assertEqual(obj.extradc.control_group, 'M')
self.assertTrue(isinstance(obj.extradc.content, DublinCore))
# verify those datastreams on a new version fetched fresh from the
# repo
fetched = self.repo.get_object(obj.pid, type=MyDigitalObject)
self.assertEqual(fetched.dc.label, 'Dublin Core')
self.assertEqual(fetched.dc.mimetype, 'text/xml')
self.assertEqual(fetched.dc.versionable, False)
self.assertEqual(fetched.dc.state, 'A')
self.assertEqual(fetched.dc.format, 'http://www.openarchives.org/OAI/2.0/oai_dc/')
self.assertEqual(fetched.dc.control_group, 'X')
self.assertEqual(fetched.dc.content.identifier, fetched.pid)
self.assertEqual(fetched.rels_ext.label, 'External Relations')
self.assertEqual(fetched.rels_ext.mimetype, 'application/rdf+xml')
self.assertEqual(fetched.rels_ext.versionable, False)
self.assertEqual(fetched.rels_ext.state, 'A')
self.assertEqual(fetched.rels_ext.format, 'info:fedora/fedora-system:FedoraRELSExt-1.0')
self.assertEqual(fetched.rels_ext.control_group, 'X')
self.assert_((obj.uriref, modelns.hasModel, URIRef(MyDigitalObject.CONTENT_MODELS[0])) in
fetched.rels_ext.content)
self.assert_((obj.uriref, modelns.hasModel, URIRef(MyDigitalObject.CONTENT_MODELS[1])) in
fetched.rels_ext.content)
self.assertEqual(fetched.extradc.label, 'Managed DC XML datastream')
self.assertEqual(fetched.extradc.mimetype, 'application/xml')
self.assertEqual(fetched.extradc.versionable, True)
self.assertEqual(fetched.extradc.state, 'A')
self.assertEqual(fetched.extradc.control_group, 'M')
self.assertTrue(isinstance(fetched.extradc.content, DublinCore))
def test_modified_datastreams(self):
"""Verify that we can modify a new object's datastreams before
ingesting it."""
obj = MyDigitalObject(self.api, pid=self.getNextPid(), create=True)
# modify content for dc (metadata should be covered by other tests)
obj.dc.content.description = 'A test object'
obj.dc.content.rights = 'Rights? Sure, copy our test object.'
# modify managed xml content (more metadata in text, below)
obj.extradc.content.description = 'Still the same test object'
# rewrite info and content for a managed binary datastream
obj.text.label = 'The outer limits of testing'
obj.text.mimetype = 'text/x-test'
obj.text.versionable = True
obj.text.state = 'I'
obj.text.format = 'http://example.com/'
obj.text.content = 'We are controlling transmission.'
# save and verify in the same object
obj.save()
self.append_pid(obj.pid)
self.assertEqual(obj.dc.content.description, 'A test object')
self.assertEqual(obj.dc.content.rights, 'Rights? Sure, copy our test object.')
self.assertEqual(obj.extradc.content.description, 'Still the same test object')
self.assertEqual(obj.text.label, 'The outer limits of testing')
self.assertEqual(obj.text.mimetype, 'text/x-test')
self.assertEqual(obj.text.versionable, True)
self.assertEqual(obj.text.state, 'I')
self.assertEqual(obj.text.format, 'http://example.com/')
self.assertEqual(obj.text.content, b'We are controlling transmission.')
# re-fetch and verify
fetched = MyDigitalObject(self.api, obj.pid)
self.assertEqual(fetched.dc.content.description, 'A test object')
self.assertEqual(fetched.dc.content.rights, 'Rights? Sure, copy our test object.')
self.assertEqual(fetched.extradc.content.description, 'Still the same test object')
self.assertEqual(fetched.text.label, 'The outer limits of testing')
self.assertEqual(fetched.text.mimetype, 'text/x-test')
self.assertEqual(fetched.text.versionable, True)
self.assertEqual(fetched.text.state, 'I')
self.assertEqual(fetched.text.format, 'http://example.com/')
self.assertEqual(fetched.text.content, b'We are controlling transmission.')
def test_modify_multiple(self):
obj = self.repo.get_object(type=MyDigitalObject)
obj.label = 'test label'
obj.dc.content.title = 'test dc title'
obj.image.content = open(os.path.join(FIXTURE_ROOT, 'test.png'), mode='rb')
obj.save()
self.append_pid(obj.pid)
# update and save multiple pieces, including filedatastream metadata
obj.label = 'new label'
obj.dc.content.title = 'new dc title'
obj.image.label = 'testimage.png'
saved = obj.save()
self.assertTrue(saved)
updated_obj = self.repo.get_object(obj.pid, type=MyDigitalObject)
self.assertEqual(obj.label, updated_obj.label)
self.assertEqual(obj.dc.content.title, updated_obj.dc.content.title)
self.assertEqual(obj.image.label, updated_obj.image.label)
def test_new_file_datastream(self):
obj = self.repo.get_object(type=MyDigitalObject)
obj.image.content = open(os.path.join(FIXTURE_ROOT, 'test.png'), mode='rb')
obj.save()
self.append_pid(obj.pid)
fetched = self.repo.get_object(obj.pid, type=MyDigitalObject)
file = open(os.path.join(FIXTURE_ROOT, 'test.png'), mode='rb')
self.assertEqual(fetched.image.content.read(), file.read())
def test_new_getdatastream(self):
# use getDatastreamObject to add a datastream not defined
# on the digital object
self.repo.default_pidspace = self.pidspace
obj = self.repo.get_object(type=MyDigitalObject)
dsid = 'new_ds'
content = 'here is some simple text content'
label = 'my ad-hoc datastream'
new_ds = obj.getDatastreamObject(dsid)
new_ds.content = content
new_ds.label = label
new_ds.mimetype = 'text/plain'
obj.save()
self.append_pid(obj.pid)
# fetch fresh copy from repo for inspection
fetched = self.repo.get_object(obj.pid, type=MyDigitalObject)
self.assert_(dsid in fetched.ds_list)
dsobj = fetched.getDatastreamObject(dsid)
self.assertEqual(label, dsobj.label)
self.assertEqual('text/plain', dsobj.mimetype)
self.assertEqual(content, force_text(dsobj.content))
# add new datastream to existing object using the same method
dsid2 = 'newer_ds'
content = 'totally different content here'
label = 'yet another ad-hoc datastream'
newer_ds = fetched.getDatastreamObject(dsid2)
newer_ds.content = content
newer_ds.label = label
newer_ds.mimetype = 'text/plain'
fetched.save()
# re-fetch for inspect
fetched = self.repo.get_object(obj.pid, type=MyDigitalObject)
self.assert_(dsid2 in fetched.ds_list)
dsobj = fetched.getDatastreamObject(dsid2)
self.assertEqual(label, dsobj.label)
self.assertEqual('text/plain', dsobj.mimetype)
self.assertEqual(content, force_text(dsobj.content))
class TestDigitalObject(FedoraTestCase):
fixtures = ['object-with-pid.foxml']
pidspace = FEDORA_PIDSPACE
def setUp(self):
super(TestDigitalObject, self).setUp()
self.pid = self.fedora_fixtures_ingested[-1] # get the pid for the last object
self.obj = MyDigitalObject(self.api, self.pid)
_add_text_datastream(self.obj)
# get fixture ingest time from the server (the hard way) for testing
r = self.obj.api.getDatastream(self.pid, "DC")
dsprofile_node = etree.fromstring(r.content, base_url=r.url)
created_s = dsprofile_node.xpath('string(m:dsCreateDate)',
namespaces={'m': FEDORA_MANAGE_NS})
self.ingest_time = fedoratime_to_datetime(created_s)
def test_properties(self):
self.assertEqual(self.pid, self.obj.pid)
self.assertTrue(self.obj.uri.startswith("info:fedora/"))
self.assertTrue(self.obj.uri.endswith(self.pid))
def test_get_object_info(self):
self.assertEqual(self.obj.label, "A partially-prepared test object")
self.assertEqual(self.obj.owner, "tester")
self.assertEqual(self.obj.state, "A")
try:
self.assertAlmostEqual(self.ingest_time, self.obj.created,
delta=ONE_SEC)
except TypeError:
# delta keyword unavailable before python 2.7
self.assert_(abs(self.ingest_time - self.obj.created) < ONE_SEC)
self.assert_(self.ingest_time < self.obj.modified)
def test_save_object_info(self):
self.obj.label = "An updated test object"
self.obj.owner = "notme"
self.obj.state = "I"
saved = self.obj._saveProfile("saving test object profile")
self.assertTrue(saved, "DigitalObject saveProfile should return True on successful update")
profile = self.obj.getProfile() # get fresh from fedora to confirm updated
self.assertEqual(profile.label, "An updated test object")
self.assertEqual(profile.owner, "notme")
self.assertEqual(profile.state, "I")
self.assertNotEqual(profile.created, profile.modified,
"object create date should not equal modified after updating object profile")
def test_object_label(self):
# object label set method has special functionality
self.obj.label = ' '.join('too long' for i in range(50))
self.assertEqual(self.obj.label_max_size, len(self.obj.label),
'object label should be truncated to 255 characters')
self.assertTrue(self.obj.info_modified, 'object info modified when object label has changed')
self.obj.info_modified = False
self.obj.label = str(self.obj.label)
self.assertFalse(self.obj.info_modified,
'object info should not be considered modified after setting label to its current value')
def test_object_owner(self):
self.obj.owner = ','.join('userid' for i in range(14))
self.assertTrue(len(self.obj.owner) <= self.obj.owner_max_size,
'object owner should be truncated to 64 characters or less')
self.assertTrue(self.obj.info_modified,
'object info modified when object owner has changed')
# last value should not be truncated
self.assertTrue(self.obj.owner.endswith('userid'))
# non-delimited value should just be truncated
self.obj.owner = ''.join('longestownernameever' for i in range(10))
self.assertEqual(self.obj.owner_max_size, len(self.obj.owner),
'object owner should be truncated to 64 characters or less')
def test_save(self):
# unmodified object - save should do nothing
self.obj.save()
self.append_pid(self.obj.pid)
# modify object profile, datastream content, datastream info
self.obj.label = "new label"
self.obj.dc.content.title = "new dublin core title"
self.obj.text.label = "text content"
self.obj.text.checksum_type = "MD5"
self.obj.text.checksum = "avcd"
# Saving with incorrect checksum should fail.
expected_error = None
try:
self.obj.save()
except models.DigitalObjectSaveFailure as err:
# Error should go here
expected_error = err
self.assert_('successfully backed out' in str(expected_error),
'Incorrect checksum should back out successfully.')
# re-initialize the object. do it with a unicode pid to test a regression.
self.obj = MyDigitalObject(self.api, force_text(self.pid))
# modify object profile, datastream content, datastream info
self.obj.label = u"new label\u2014with unicode"
self.obj.dc.content.title = u"new dublin core title\u2014also with unicode"
self.obj.text.label = "text content"
self.obj.text.checksum_type = "MD5"
self.obj.text.checksum = "1c83260ff729265470c0d349e939c755"
return_status = self.obj.save()
#Correct checksum should modify correctly.
self.assertEqual(True, return_status)
# confirm all changes were saved to fedora
profile = self.obj.getProfile()
self.assertEqual(profile.label, u"new label\u2014with unicode")
r = self.obj.api.getDatastreamDissemination(self.pid, self.obj.dc.id)
self.assert_(u'<dc:title>new dublin core title\u2014also with unicode</dc:title>' in force_text(r.content))
text_info = self.obj.getDatastreamProfile(self.obj.text.id)
self.assertEqual(text_info.label, "text content")
self.assertEqual(text_info.checksum_type, "MD5")
# force an error on saving DC to test backing out text datastream
self.obj.text.content = "some new text"
self.obj.dc.content = "this is not dublin core!" # NOTE: setting xml content like this could change...
# catch the exception so we can inspect it
try:
self.obj.save()
except models.DigitalObjectSaveFailure as f:
save_error = f
self.assert_(isinstance(save_error, models.DigitalObjectSaveFailure))
self.assertEqual(save_error.obj_pid, self.obj.pid,
"save failure exception should include object pid %s, got %s" % (self.obj.pid, save_error.obj_pid))
self.assertEqual(save_error.failure, "DC", )
self.assertEqual(set(['TEXT', 'DC']), set(save_error.to_be_saved))
self.assertEqual(['TEXT'], save_error.saved)
self.assertEqual(['TEXT'], save_error.cleaned)
self.assertEqual([], save_error.not_cleaned)
self.assertTrue(save_error.recovered)
r = self.obj.api.getDatastreamDissemination(self.pid, self.obj.text.id)
self.assertEqual(TEXT_CONTENT, r.text)
# force an error updating the profile, should back out both datastreams
self.obj = MyDigitalObject(self.api, self.pid)
self.obj.text.content = "some new text"
self.obj.dc.content.description = "happy happy joy joy"
# object label is limited in length - force an error with a label that exceeds it
# NOTE: bypassing the label property because label set method now truncates to 255 characters
self.obj.info.label = ' '.join('too long' for i in range(50))
self.obj.info_modified = True
try:
self.obj.save()
except models.DigitalObjectSaveFailure as f:
profile_save_error = f
self.assert_(isinstance(profile_save_error, models.DigitalObjectSaveFailure))
self.assertEqual(profile_save_error.obj_pid, self.obj.pid,
"save failure exception should include object pid %s, got %s" % (self.obj.pid, save_error.obj_pid))
self.assertEqual(profile_save_error.failure, "object profile", )
all_datastreams = set(['TEXT', 'DC'])
self.assertEqual(all_datastreams, set(profile_save_error.to_be_saved))
self.assertEqual(all_datastreams, set(profile_save_error.saved))
self.assertEqual(all_datastreams, set(profile_save_error.cleaned))
self.assertEqual([], profile_save_error.not_cleaned)
self.assertTrue(profile_save_error.recovered)
# confirm datastreams were reverted back to previous contents
r = self.obj.api.getDatastreamDissemination(self.pid, self.obj.text.id)
self.assertEqual(TEXT_CONTENT, r.text)
r = self.obj.api.getDatastreamDissemination(self.pid, self.obj.dc.id)
self.assert_("<dc:description>This object has more data in it than a basic-object.</dc:description>" in r.text)
# how to force an error that can't be backed out?
def test_datastreams_list(self):
self.assert_("DC" in self.obj.ds_list.keys())
self.assert_(isinstance(self.obj.ds_list["DC"], ObjectDatastream))
dc = self.obj.ds_list["DC"]
self.assertEqual("DC", dc.dsid)
self.assertEqual("Dublin Core", dc.label)
self.assertEqual("text/xml", dc.mimeType)
self.assert_("TEXT" in self.obj.ds_list.keys())
text = self.obj.ds_list["TEXT"]
self.assertEqual("text datastream", text.label)
self.assertEqual("text/plain", text.mimeType)
def test_get_datastream_object(self):
# NOTE: this is not an exhaustive test of getDatastreamObject, but
# is a test for a particular error introduced somewhere between eulfedora
# 0.20 and 0.23
# error when using defined datastreams - e.g. returns xmldatastream instead of
# XmlDatastreamObject
ds = self.obj.getDatastreamObject('extradc')
self.assert_(isinstance(ds, models.DatastreamObject))
self.assertFalse(ds.exists)
def test_history(self):
self.assert_(isinstance(self.obj.history, list))
self.assert_(isinstance(self.obj.history[0], datetime))
self.assertEqual(self.ingest_time, self.obj.history[0])
def test_object_xml(self):
self.assert_(isinstance(self.obj.object_xml, FoxmlDigitalObject))
# uningested object has none
newobj = MyDigitalObject(self.api)
self.assertEqual(None, newobj.object_xml)
def test_audit_trail(self):
self.assert_(isinstance(self.obj.audit_trail, AuditTrail))
self.assert_(isinstance(self.obj.audit_trail.records[0], AuditTrailRecord))
# inspect the audit trail by adding text datastream in setup
audit = self.obj.audit_trail.records[0]
self.assertEqual('AUDREC1', audit.id)
self.assertEqual('Fedora API-M', audit.process_type)
self.assertEqual('addDatastream', audit.action)
self.assertEqual('TEXT', audit.component)
self.assertEqual('fedoraAdmin', audit.user)
self.assert_(isinstance(audit.date, datetime))
self.assertEqual('creating new datastream', audit.message)
# uningested object has none
newobj = MyDigitalObject(self.api)
self.assertEqual(None, newobj.audit_trail)
# test audit-trail derived properties
# no ingest message set, therefore no ingest user in audit trail
self.assertEqual(None, self.obj.ingest_user)
self.assertEqual(set(['fedoraAdmin']), self.obj.audit_trail_users)
# tweak xml in the audit trail to test
self.obj.audit_trail.records[0].action = 'ingest'
self.obj.audit_trail.records.extend([AuditTrailRecord(user='editor'),
AuditTrailRecord(user='manager'),
AuditTrailRecord(user='editor')])
self.assertEqual('fedoraAdmin', self.obj.ingest_user)
self.assertEqual(set(['fedoraAdmin', 'editor', 'manager']),
self.obj.audit_trail_users)
# should not error when audit trail is not available
newobj = MyDigitalObject(self.api)
self.assertEqual(None, newobj.ingest_user)
self.assertEqual(set(), newobj.audit_trail_users)
def test_methods(self):
methods = self.obj.methods
self.assert_('fedora-system:3' in methods) # standard system sdef
self.assert_('viewMethodIndex' in methods['fedora-system:3'])
def test_has_model(self):
cmodel_uri = "info:fedora/control:ContentType"
# FIXME: checking when rels-ext datastream does not exist causes an error
self.assertFalse(self.obj.has_model(cmodel_uri))
self.obj.add_relationship(modelns.hasModel, cmodel_uri)
self.assertTrue(self.obj.has_model(cmodel_uri))
self.assertFalse(self.obj.has_model(self.obj.uri))
def test_get_models(self):
cmodel_uri = "info:fedora/control:ContentType"
# FIXME: checking when rels-ext datastream does not exist causes an error
self.assertEqual(self.obj.get_models(), [])
self.obj.add_relationship(modelns.hasModel, cmodel_uri)
self.assertEquals(self.obj.get_models(), [URIRef(cmodel_uri)])
def test_has_requisite_content_models(self):
# fixture has no content models
# init fixture as generic object
obj = models.DigitalObject(self.api, self.pid)
# should have all required content models because there are none
self.assertTrue(obj.has_requisite_content_models)
# init fixture as test digital object with cmodels
obj = MyDigitalObject(self.api, self.pid)
# initially false since fixture has no cmodels
self.assertFalse(obj.has_requisite_content_models)
# add first cmodel
obj.rels_ext.content.add((obj.uriref, modelns.hasModel,
URIRef(MyDigitalObject.CONTENT_MODELS[0])))
# should still be false since both are required
self.assertFalse(obj.has_requisite_content_models)
# add second cmodel
obj.rels_ext.content.add((obj.uriref, modelns.hasModel,
URIRef(MyDigitalObject.CONTENT_MODELS[1])))
# now all cmodels should be present
self.assertTrue(obj.has_requisite_content_models)
# add an additional, extraneous cmodel
obj.rels_ext.content.add((obj.uriref, modelns.hasModel,
URIRef(SimpleDigitalObject.CONTENT_MODELS[0])))
# should still be true
self.assertTrue(obj.has_requisite_content_models)
def test_add_relationships(self):
# add relation to a resource, by digital object
related = models.DigitalObject(self.api, "foo:123")
added = self.obj.add_relationship(relsext.isMemberOf, related)
self.assertTrue(added, "add relationship should return True on success, got %s" % added)
r = self.obj.api.getDatastreamDissemination(self.pid, "RELS-EXT")
self.assert_("isMemberOf" in r.text)
self.assert_(related.uri in r.text) # should be full uri, not just pid
# add relation to a resource, by string
collection_uri = "info:fedora/foo:456"
self.obj.add_relationship(relsext.isMemberOfCollection, collection_uri)
r = self.obj.api.getDatastreamDissemination(self.pid, "RELS-EXT")
self.assert_("isMemberOfCollection" in r.text)
self.assert_('rdf:resource="%s"' % collection_uri in r.text,
'string uri should be added to rels-ext as a resource')
# add relation to a resource, by string
collection_uri = u"info:fedora/foo:457"
self.obj.add_relationship(relsext.isMemberOfCollection, collection_uri)
r = self.obj.api.getDatastreamDissemination(self.pid, "RELS-EXT")
self.assert_("isMemberOfCollection" in r.text)
self.assert_('rdf:resource="%s"' % collection_uri in r.text,
'unicode uri should be added to rels-ext as a resource')
# add relation to a literal
self.obj.add_relationship('info:fedora/example:owner', "testuser")
r = self.obj.api.getDatastreamDissemination(self.pid, "RELS-EXT")
self.assert_("owner" in r.text)
self.assert_("testuser" in r.text)
rels = self.obj.rels_ext.content
# convert first added relationship to rdflib statement to check that it is in the rdf graph
st = (self.obj.uriref, relsext.isMemberOf, related.uriref)
self.assertTrue(st in rels)
def test_purge_relationships(self):
# purge relation from a resource, by digital object
related = models.DigitalObject(self.api, "foo:123")
self.obj.add_relationship(relsext.isMemberOf, related)
purged = self.obj.purge_relationship(relsext.isMemberOf, related)
self.assertTrue(purged, "add relationship should return True on success, got %s" % purged)
r = self.obj.api.getDatastreamDissemination(self.pid, "RELS-EXT")
self.assert_("isMemberOf" not in r.text)
self.assert_(related.uri not in r.text) # should be full uri, not just pid
# purge relation from a resource, by string
collection_uri = "info:fedora/foo:456"
self.obj.add_relationship(relsext.isMemberOfCollection, collection_uri)
self.obj.purge_relationship(relsext.isMemberOfCollection, collection_uri)
r = self.obj.api.getDatastreamDissemination(self.pid, "RELS-EXT")
self.assert_("isMemberOfCollection" not in r.text)
self.assert_(collection_uri not in r.text)
# purge relation to a literal
self.obj.add_relationship('info:fedora/example:owner', "testuser")
self.obj.purge_relationship('info:fedora/example:owner', "testuser")
r = self.obj.api.getDatastreamDissemination(self.pid, "RELS-EXT")
self.assert_("owner" not in r.text)
self.assert_("testuser" not in r.text)
rels = self.obj.rels_ext.content
# convert first added relationship to rdflib statement to check that it is NOT in the rdf graph
st = (self.obj.uriref, relsext.isMemberOf, related.uriref)
self.assertTrue(st not in rels)
def test_modify_relationships(self):
# modify a pre-existing relation to a resource, by digital object
old_related = models.DigitalObject(self.api, "foo:1234")
new_related = models.DigitalObject(self.api, "foo:5678")
self.obj.add_relationship(relsext.isMemberOf, old_related)
modified = self.obj.modify_relationship(relsext.isMemberOf, old_related, new_related)
self.assertTrue(modified, "modify relationship should return True on success, got %s" % modified)
r = self.obj.api.getDatastreamDissemination(self.pid, "RELS-EXT")
self.assert_("isMemberOf" in r.text)
self.assert_(new_related.uri in r.text) # should be full uri, not just pid
# modify a pre-existing relation, by string
old_collection_uri = "info:fedora/foo:8765"
new_collection_uri = "info:fedora/foo:4321"
self.obj.add_relationship(relsext.isMemberOfCollection, old_collection_uri)
self.obj.modify_relationship(relsext.isMemberOfCollection, old_collection_uri, new_collection_uri)
r = self.obj.api.getDatastreamDissemination(self.pid, "RELS-EXT")
self.assert_("isMemberOfCollection" in r.text)
self.assert_(new_collection_uri in r.text)
# modify a relation to a literal
self.obj.add_relationship('info:fedora/example:owner', "old_testuser")
self.obj.modify_relationship('info:fedora/example:owner', "old_testuser", "new_testuser")
r = self.obj.api.getDatastreamDissemination(self.pid, "RELS-EXT")
self.assert_("owner" in r.text)
self.assert_("new_testuser" in r.text)
rels = self.obj.rels_ext.content
# convert first modified relationship to rdflib statement to check that it is in the rdf graph
st = (self.obj.uriref, relsext.isMemberOf, new_related.uriref)
self.assertTrue(st in rels)
def test_registry(self):
self.assert_('test.test_fedora.test_models.MyDigitalObject' in
models.DigitalObject.defined_types)
def test_index_data(self):
indexdata = self.obj.index_data()
# check that top-level object properties are included in index data
# (implicitly checking types)
self.assertEqual(self.obj.pid, indexdata['pid'])
self.assertEqual(self.obj.owners, indexdata['owner'])
self.assertEqual(self.obj.label, indexdata['label'])
self.assertEqual(self.obj.modified.isoformat(), indexdata['last_modified'])
self.assertEqual(self.obj.created.isoformat(), indexdata['created'])
self.assertEqual(self.obj.state, indexdata['state'])
for cm in self.obj.get_models():
self.assert_(str(cm) in indexdata['content_model'])
# descriptive data included in index data
self.assert_(self.obj.dc.content.title in indexdata['title'])
self.assert_(self.obj.dc.content.description in indexdata['description'])
self.assertEqual(set(['TEXT', 'DC']), set(indexdata['dsids']))
def test_index_data_relations(self):
# add a few rels-ext relations to test
partof = 'something bigger'
self.obj.rels_ext.content.add((self.obj.uriref, relsext.isPartOf, URIRef(partof)))
member1 = 'foo'
member2 = 'bar'
self.obj.rels_ext.content.add((self.obj.uriref, relsext.hasMember, URIRef(member1)))
self.obj.rels_ext.content.add((self.obj.uriref, relsext.hasMember, URIRef(member2)))
indexdata = self.obj.index_data_relations()
self.assertEqual([partof], indexdata['isPartOf'])
self.assert_(member1 in indexdata['hasMember'])
self.assert_(member2 in indexdata['hasMember'])
# rels-ext data included in main index data
indexdata = self.obj.index_data()
self.assert_('isPartOf' in indexdata)
self.assert_('hasMember' in indexdata)
def test_get_object(self):
obj = MyDigitalObject(self.api)
otherobj = obj.get_object(self.pid)
self.assert_(isinstance(otherobj, MyDigitalObject),
'if type is not specified, get_object should return current type')
self.assertEqual(self.api, otherobj.api,
'get_object should pass existing api connection')
otherobj = obj.get_object(self.pid, type=SimpleDigitalObject)
self.assert_(isinstance(otherobj, SimpleDigitalObject),
'get_object should object with requested type')
class TestContentModel(FedoraTestCase):
def tearDown(self):
super(TestContentModel, self).tearDown()
cmodels = list(MyDigitalObject.CONTENT_MODELS)
cmodels.extend(SimpleDigitalObject.CONTENT_MODELS)
for pid in cmodels:
try:
self.repo.purge_object(pid)
except RequestFailed as rf:
logger.warn('Error purging %s: %s' % (pid, rf))
# patch ContentModel to avoid actually ingesting into fedora
@patch.object(models.ContentModel, '_ingest', new=Mock())
def test_for_class(self):
CMODEL_URI = models.ContentModel.CONTENT_MODELS[0]
# first: create a cmodel for SimpleDigitalObject, the simple case
cmodel = models.ContentModel.for_class(SimpleDigitalObject, self.repo)
expect_uri = SimpleDigitalObject.CONTENT_MODELS[0]
self.assertEqual(cmodel.uri, expect_uri)
self.assertTrue(cmodel.has_model(CMODEL_URI))
dscm = cmodel.ds_composite_model.content
typemodel = dscm.get_type_model('TEXT')
self.assertEqual(typemodel.mimetype, 'text/plain')
typemodel = dscm.get_type_model('EXTRADC')
self.assertEqual(typemodel.mimetype, 'text/xml')
# try ContentModel itself. Content model objects have the "content
# model" content model. That content model should already be in
# every repo, so for_class shouldn't need to make anything.
cmodel = models.ContentModel.for_class(models.ContentModel, self.repo)
expect_uri = models.ContentModel.CONTENT_MODELS[0]
self.assertEqual(cmodel.uri, expect_uri)
self.assertTrue(cmodel.has_model(CMODEL_URI))
dscm = cmodel.ds_composite_model.content
typemodel = dscm.get_type_model('DS-COMPOSITE-MODEL')
self.assertEqual(typemodel.mimetype, 'text/xml')
self.assertEqual(typemodel.format_uri, 'info:fedora/fedora-system:FedoraDSCompositeModel-1.0')
# try MyDigitalObject. this should fail, as MyDigitalObject has two
# CONTENT_MODELS: we support only one
self.assertRaises(ValueError, models.ContentModel.for_class,
MyDigitalObject, self.repo)
# using DC namespace to test RDF literal values
DCNS = Namespace(URIRef('http://purl.org/dc/elements/1.1/'))
class SiblingObject(models.DigitalObject):
pass
class RelatorObject(MyDigitalObject):
# related object
parent = models.Relation(relsext.isMemberOfCollection, type=SimpleDigitalObject)
# literal
dctitle = models.Relation(DCNS.title)
# literal with explicit type and namespace prefix
dcid = models.Relation(DCNS.identifier, ns_prefix={'dcns': DCNS}, rdf_type=XSD.int)
# type of "self"
recursive_rel = models.Relation(relsext.isMemberOf, type='self')
# test variant options for automatic reverse relations
other = models.Relation(relsext.isMemberOfCollection, type=SimpleDigitalObject,
related_name='related_items', related_order=DCNS.title)
parent1 = models.Relation(relsext.isMemberOfCollection, type=models.DigitalObject,
related_name='my_custom_rel')
sib = models.Relation(relsext.isMemberOf, type=SiblingObject,
related_name='+')
class ReverseRelator(MyDigitalObject):
member = models.ReverseRelation(relsext.isMemberOfCollection, type=RelatorObject)
members = models.ReverseRelation(relsext.isMemberOfCollection,
type=RelatorObject, multiple=True)
sorted_members = models.ReverseRelation(relsext.isMemberOfCollection,
type=RelatorObject, multiple=True, order_by=DCNS.title)
class TestRelation(FedoraTestCase):
fixtures = ['object-with-pid.foxml']
def setUp(self):
super(TestRelation, self).setUp()
self.pid = self.fedora_fixtures_ingested[-1] # get the pid for the last object
self.obj = RelatorObject(self.api)
def test_object_relation(self):
# get - not yet set
self.assertEqual(None, self.obj.parent)
# set via descriptor
newobj = models.DigitalObject(self.api)
newobj.pid = 'foo:2' # test pid for convenience/distinguish temp pids
self.obj.parent = newobj
self.assert_((self.obj.uriref, relsext.isMemberOfCollection, newobj.uriref)
in self.obj.rels_ext.content,
'isMemberOfCollection should be set in RELS-EXT after updating via descriptor')
# access via descriptor
self.assertEqual(newobj.pid, self.obj.parent.pid)
self.assert_(isinstance(self.obj.parent, SimpleDigitalObject),
'Relation descriptor returns configured type of DigitalObject')
# set existing property
otherobj = models.DigitalObject(self.api)
otherobj.pid = 'bar:none'
self.obj.parent = otherobj
self.assert_((self.obj.uriref, relsext.isMemberOfCollection, otherobj.uriref)
in self.obj.rels_ext.content,
'isMemberOfCollection should be updated in RELS-EXT after update')
self.assert_((self.obj.uriref, relsext.isMemberOfCollection, newobj.uriref)
not in self.obj.rels_ext.content,
'previous isMemberOfCollection value should not be in RELS-EXT after update')
# delete
del self.obj.parent
self.assertEqual(None, self.obj.rels_ext.content.value(subject=self.obj.uriref,
predicate=relsext.isMemberOfCollection),
'isMemberOfCollection should not be set in rels-ext after delete')
def test_recursive_relation(self):
self.assertEqual(None, self.obj.recursive_rel)
# set via descriptor
newobj = models.DigitalObject(self.api)
newobj.pid = 'foo:3' # test pid for convenience/distinguish temp pids
self.obj.recursive_rel = newobj
# access to check type
self.assert_(isinstance(self.obj.recursive_rel, RelatorObject))
def test_literal_relation(self):
# get - not set
self.assertEqual(None, self.obj.dcid)
self.assertEqual(None, self.obj.dctitle)
# set via descriptor
# - integer, with type specified
self.obj.dcid = 1234
self.assert_((self.obj.uriref, DCNS.identifier, Literal(1234, datatype=XSD.int))
in self.obj.rels_ext.content,
'literal value should be set in RELS-EXT after updating via descriptor')
# check namespace prefix
self.assert_('dcns:identifier' in force_text(self.obj.rels_ext.content.serialize()),
'configured namespace prefix should be used for serialization')
# check type
self.assert_('XMLSchema#int' in force_text(self.obj.rels_ext.content.serialize()),
'configured RDF type should be used for serialization')
# - simpler case
self.obj.dctitle = 'foo'
self.assert_((self.obj.uriref, DCNS.title, Literal('foo'))
in self.obj.rels_ext.content,
'literal value should be set in RELS-EXT after updating via descriptor')
self.assertEqual('foo', self.obj.dctitle)
# get
self.assertEqual(1234, self.obj.dcid)
# update
self.obj.dcid = 987
self.assertEqual(987, self.obj.dcid)
# delete
del self.obj.dcid
self.assertEqual(None, self.obj.rels_ext.content.value(subject=self.obj.uriref,
predicate=DCNS.identifier),
'dc:identifier should not be set in rels-ext after delete')
def test_reverse_relation(self):
rev = ReverseRelator(self.api, pid=self.getNextPid())
# add a relation to the object and save so we can query risearch
self.obj.parent = rev
self.obj.dc.content.title = 'title b'
self.obj.save()
# run an risearch query with flush updates true
# so that tests do not require syncUpdates to be enabled
self.repo.risearch.count_statements('<%s> * *' % self.obj.pid,
flush=True)
self.fedora_fixtures_ingested.append(self.obj.pid) # save pid for cleanup in tearDown
self.assertEqual(rev.member.pid, self.obj.pid,
'ReverseRelation returns correct object based on risearch query')
self.assert_(isinstance(rev.member, RelatorObject),
'ReverseRelation returns correct object type')
obj2 = RelatorObject(self.api)
obj2.parent = rev
obj2.dc.content.title = 'title a'
obj2.save()
# run an risearch query with flush updates true
# so that tests do not require syncUpdates to be enabled
self.repo.risearch.count_statements('<%s> * *' % self.obj.pid,
flush=True)
self.assert_(isinstance(rev.members, list),
'ReverseRelation returns list when multiple=True')
pids = [m.pid for m in rev.members]
self.assertTrue(self.obj.pid in pids,
'ReverseRelation list includes expected object')
self.assertTrue(obj2.pid in pids,
'ReverseRelation list includes expected object')
self.assert_(isinstance(rev.members[0], RelatorObject),
'ReverseRelation list items initialized as correct object type')
# test order by
self.assert_(isinstance(rev.sorted_members, list),
'ReverseRelation returns list for multiple=True with order_by')
pids = [m.pid for m in rev.sorted_members]
self.assertTrue(self.obj.pid in pids,
'ReverseRelation list includes expected object')
self.assertTrue(obj2.pid in pids,
'ReverseRelation list includes expected object')
self.assert_(isinstance(rev.sorted_members[0], RelatorObject),
'ReverseRelation list items initialized as correct object type')
self.assertEqual(obj2.pid, rev.sorted_members[0].pid,
'ReverseRelation items are sorted correctly by specified field')
def test_auto_reverse_relation(self):
# default reverse name based on classname
self.assert_(hasattr(SimpleDigitalObject, 'relatorobject_set'))
self.assert_(isinstance(SimpleDigitalObject.relatorobject_set,
models.ReverseRelation))
# check reverse-rel is configured correctly
self.assertEqual(relsext.isMemberOfCollection,
SimpleDigitalObject.relatorobject_set.relation)
self.assertEqual(RelatorObject,
SimpleDigitalObject.relatorobject_set.object_type)
self.assertEqual(True,
SimpleDigitalObject.relatorobject_set.multiple)
# reverse order not set
self.assertEqual(None,
SimpleDigitalObject.relatorobject_set.order_by)
# explicitly named reverse rel
self.assert_(hasattr(SimpleDigitalObject, 'related_items'))
# reverse rel order passed through
self.assertEqual(DCNS.title,
SimpleDigitalObject.related_items.order_by)
# generic digital object should *NOT* get reverse rels
self.assertFalse(hasattr(models.DigitalObject, 'my_custom_rel'))
# related_name of + also means no reverse rel
self.assertFalse(hasattr(SiblingObject, 'relatorobject_set'))
|
WSULib/eulfedora
|
test/test_fedora/test_models.py
|
Python
|
apache-2.0
| 62,832
|
from __future__ import unicode_literals
from netaddr import EUI, AddrFormatError
from django import forms
from django.core.exceptions import ValidationError
#
# Form fields
#
class MACAddressFormField(forms.Field):
default_error_messages = {
'invalid': "Enter a valid MAC address.",
}
def to_python(self, value):
if not value:
return None
if isinstance(value, EUI):
return value
try:
return EUI(value, version=48)
except AddrFormatError:
raise ValidationError("Please specify a valid MAC address.")
|
snazy2000/netbox
|
netbox/dcim/formfields.py
|
Python
|
apache-2.0
| 607
|
#!/usr/bin/env python
# Copyright (c) Suchakra Sharma <suchakrapani.sharma@polymtl.ca>
# Licensed under the Apache License, Version 2.0 (the "License")
from bcc import BPF, _get_num_open_probes, TRACEFS
import os
import sys
from unittest import main, TestCase
class TestKprobeCnt(TestCase):
def setUp(self):
self.b = BPF(text="""
int wololo(void *ctx) {
return 0;
}
""")
self.b.attach_kprobe(event_re="^vfs_.*", fn_name="wololo")
def test_attach1(self):
actual_cnt = 0
with open("%s/available_filter_functions" % TRACEFS, "rb") as f:
for line in f:
if line.startswith(b"vfs_"):
actual_cnt += 1
open_cnt = self.b.num_open_kprobes()
self.assertEqual(actual_cnt, open_cnt)
def tearDown(self):
self.b.cleanup()
class TestProbeGlobalCnt(TestCase):
def setUp(self):
self.b1 = BPF(text="""int count(void *ctx) { return 0; }""")
self.b2 = BPF(text="""int count(void *ctx) { return 0; }""")
def test_probe_quota(self):
self.b1.attach_kprobe(event="schedule", fn_name="count")
self.b2.attach_kprobe(event="submit_bio", fn_name="count")
self.assertEqual(1, self.b1.num_open_kprobes())
self.assertEqual(1, self.b2.num_open_kprobes())
self.assertEqual(2, _get_num_open_probes())
self.b1.cleanup()
self.b2.cleanup()
self.assertEqual(0, _get_num_open_probes())
class TestAutoKprobe(TestCase):
def setUp(self):
self.b = BPF(text="""
int kprobe__schedule(void *ctx) { return 0; }
int kretprobe__schedule(void *ctx) { return 0; }
""")
def test_count(self):
self.assertEqual(2, self.b.num_open_kprobes())
def tearDown(self):
self.b.cleanup()
class TestProbeQuota(TestCase):
def setUp(self):
self.b = BPF(text="""int count(void *ctx) { return 0; }""")
def test_probe_quota(self):
with self.assertRaises(Exception):
self.b.attach_kprobe(event_re=".*", fn_name="count")
def test_uprobe_quota(self):
with self.assertRaises(Exception):
self.b.attach_uprobe(name="c", sym_re=".*", fn_name="count")
def tearDown(self):
self.b.cleanup()
class TestProbeNotExist(TestCase):
def setUp(self):
self.b = BPF(text="""int count(void *ctx) { return 0; }""")
def test_not_exist(self):
with self.assertRaises(Exception):
b.attach_kprobe(event="___doesnotexist", fn_name="count")
def tearDown(self):
self.b.cleanup()
if __name__ == "__main__":
main()
|
mcaleavya/bcc
|
tests/python/test_probe_count.py
|
Python
|
apache-2.0
| 2,656
|
# Generated by Django 2.2.5 on 2019-12-26 20:45
from django.db import migrations, models
import django.db.models.deletion
def non_reversible_migration(apps, schema_editor):
"""Operation to "reverse" an unreversible change"""
pass
def remove_non_sadd_disaggregations(apps, schema_editor):
DisaggregationType = apps.get_model('indicators', 'DisaggregationType')
standard_disaggregations = DisaggregationType.objects.filter(standard=True, is_archived=False)
for standard_disaggregation in standard_disaggregations:
if standard_disaggregation.disaggregation_type == '---':
standard_disaggregation.delete()
elif standard_disaggregation.country_id == 6:
standard_disaggregation.standard = False
standard_disaggregation.save()
elif standard_disaggregation.disaggregation_type == 'SADD - MC Standard':
standard_disaggregation.disaggregation_type = 'Sex and Age Disaggregated Data (SADD)'
standard_disaggregation.selected_by_default = True
standard_disaggregation.save()
def assign_sadd_to_all_existing_indicators(apps, schema_editor):
Indicator = apps.get_model('indicators', 'Indicator')
DisaggregationType = apps.get_model('indicators', 'DisaggregationType')
try:
sadd_disagg = DisaggregationType.objects.get(pk=109)
except DisaggregationType.DoesNotExist:
return
for indicator in Indicator.objects.all():
indicator.disaggregation.add(sadd_disagg)
class Migration(migrations.Migration):
dependencies = [
('indicators', '0081_remove_legacy_disaggregationvalue'),
]
operations = [
migrations.AlterField(
model_name='disaggregationtype',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='workflow.Country', verbose_name='Country'),
),
migrations.RunPython(remove_non_sadd_disaggregations, non_reversible_migration),
migrations.RunPython(assign_sadd_to_all_existing_indicators, non_reversible_migration),
]
|
mercycorps/TolaActivity
|
indicators/migrations/0082_global_disaggregations_data_migration.py
|
Python
|
apache-2.0
| 2,118
|
# ===============================================================================
# Copyright 2018 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import logging
import os
import shutil
import subprocess
import time
from apptools.preferences.preference_binding import bind_preference
from pyface.confirmation_dialog import confirm
from pyface.constant import OK, YES
from pyface.directory_dialog import DirectoryDialog
from pyface.message_dialog import warning
from traits.api import Str, Enum, Bool, Int, Float, HasTraits, List, Instance, Button, CFloat
from traitsui.api import UItem, Item, View, TableEditor, VGroup, InstanceEditor, ListStrEditor, HGroup
from traitsui.table_column import ObjectColumn
from uncertainties import nominal_value, std_dev
from pychron.core.helpers.iterfuncs import groupby_group_id, groupby_key
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.pychron_traits import BorderVGroup
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.envisage.tasks.base_editor import grouped_name
from pychron.globals import globalv
from pychron.mdd import GEOMETRIES
from pychron.options.options_manager import MDDFigureOptionsManager
from pychron.paths import paths
from pychron.pipeline.nodes.base import BaseNode
from pychron.pipeline.nodes.figure import FigureNode
from pychron.processing.analyses.analysis_group import StepHeatAnalysisGroup
from pychron.regex import MDD_PARAM_REGEX
# EOF = 1
class MDDWorkspace(HasTraits):
roots = List
add_root_button = Button
def _add_root_button_fired(self):
dlg = DirectoryDialog(default_path=paths.mdd_data_dir)
if dlg.open() == OK and dlg.path:
name = os.path.basename(dlg.path)
if os.path.isfile(os.path.join(dlg.path, '{}.in'.format(name))):
self.roots.append(dlg.path)
else:
warning(None, 'Invalid MDD directory. {}. Directory must contain file '
'named {}.in'.format(dlg.path, name))
def traits_view(self):
w = BorderVGroup(UItem('roots', editor=ListStrEditor()), label='Workspaces')
b = HGroup(icon_button_editor('add_root_button', 'add',
tooltip='Add an MDD workspace (folder) to the available workspace list'))
v = View(VGroup(b, w), title='Select a MDD Workspace')
return v
def _roots_default(self):
r = []
if globalv.mdd_workspace_debug:
r = [
# os.path.join(paths.mdd_data_dir, '66208-01'),
os.path.join(paths.mdd_data_dir, '12H')
]
return r
class MDDWorkspaceNode(BaseNode):
name = 'MDD Workspace'
workspace = Instance(MDDWorkspace, ())
def run(self, state):
state.mdd_workspace = self.workspace
def traits_view(self):
g = VGroup(UItem('workspace', style='custom', editor=InstanceEditor()))
return okcancel_view(g)
fortranlogger = logging.getLogger('FortranProcess')
class MDDNode(BaseNode):
executable_name = ''
configuration_name = ''
_dumpables = None
root_dir = Str
executable_root = Str
def bind_preferences(self):
bind_preference(self, 'executable_root', 'pychron.mdd.executable_root')
def run(self, state):
if state.mdd_workspace:
for root in state.mdd_workspace.roots:
self.root_dir = root
self._write_configuration_file()
os.chdir(root)
fortranlogger.info('changing to workspace {}'.format(root))
self.run_fortan()
def _write_configuration_file(self):
with open(self.configuration_path, 'w') as wfile:
for d in self._dumpables:
if d.startswith('!'):
v = d[1:]
else:
v = getattr(self, d)
if isinstance(v, bool):
v = int(v)
line = '{}\n'.format(v)
wfile.write(line)
def run_fortan(self):
path = self.executable_path
name = self.executable_name
fortranlogger.info('------ started {}'.format(name))
p = subprocess.Popen([path],
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
while p.poll() is None:
msg = p.stdout.readline().decode('utf8').strip()
if msg == 'PYCHRON_INTERRUPT':
self.handle_interrupt(p)
else:
self.handle_stdout(p, msg)
fortranlogger.info(msg)
time.sleep(1e-6)
fortranlogger.info('------ complete {}'.format(name))
self.post_fortran()
def handle_stdout(self, proc, msg):
pass
def handle_interrupt(self, proc):
pass
def post_fortran(self):
pass
@property
def configuration_path(self):
if not self.configuration_name:
raise NotImplementedError
return self.get_path('{}.cl'.format(self.configuration_name))
@property
def executable_path(self):
if not self.executable_name:
raise NotImplementedError
root = self.executable_root
if not root:
root = paths.clovera_root
return os.path.join(root, self.executable_name)
@property
def rootname(self):
return os.path.basename(self.root_dir)
def get_path(self, name):
return os.path.join(self.root_dir, name)
class MDDLabTableNode(MDDNode):
name = 'MDD Lab Table'
temp_offset = Float
time_offset = Float
def bind_preferences(self):
bind_preference(self, 'temp_offset', 'pychron.mdd.default_temp_offset')
def run(self, state):
# from the list of unknowns in the current state
# assemble the <sample>.in file
# for use with files_py
roots = []
state.mdd_workspace = MDDWorkspace()
for gid, unks in groupby_group_id(state.unknowns):
unks = list(unks)
unk = unks[0]
name = '{}-{:02n}'.format(unk.identifier, unk.aliquot)
root = os.path.join(paths.mdd_data_dir, name)
if os.path.isdir(root):
if confirm(None, '{} already exists. Backup existing?'.format(root)) == YES:
head, tail = os.path.split(root)
dest = os.path.join(head, '~{}'.format(tail))
if os.path.isdir(dest):
shutil.rmtree(dest)
shutil.move(root, dest)
if not os.path.isdir(root):
os.mkdir(root)
roots.append(root)
ag = StepHeatAnalysisGroup(analyses=unks)
with open(os.path.join(root, '{}.in'.format(name)), 'w') as wfile:
step = 0
for unk in ag.analyses:
if unk.age > 0:
cum39 = ag.cumulative_ar39(step)
line = self._assemble(step, unk, cum39)
wfile.write(line)
step += 1
state.mdd_workspace.roots = roots
def _assemble(self, step, unk, cum39):
"""
step, T(C), t(min), 39mol, %error 39, cum ar39, age, age_er, age_er_w_j, cl_age, cl_age_er
cl_age and cl_age_er are currently ignored. Ar/Ar age is used as a placeholder instead
:param unk:
:return:
"""
temp = unk.extract_value
time_at_temp = unk.extract_duration / 60.
molv = unk.moles_k39
mol_39, e = nominal_value(molv), std_dev(molv)
mol_39_perr = e / mol_39 * 100
age = unk.age
age_err = unk.age_err_wo_j
age_err_w_j = unk.age_err
cols = [step + 1, temp - self.temp_offset, time_at_temp - self.time_offset,
mol_39, mol_39_perr, cum39, age, age_err, age_err_w_j, age, age_err]
cols = ','.join([str(v) for v in cols])
return '{}\n'.format(cols)
def traits_view(self):
g = VGroup(Item('temp_offset', label='Temp. Offset (C)', tooltip='Subtract Temp Offset from nominal lab '
'extraction temperature. e.g '
'temp = lab_temp - temp_offset'),
Item('time_offset', label='Time Offset (Min)', tooltip='Subtract Time Offset from nominal lab '
'extraction time. e.g '
'time = lab_time - time_offset'))
return okcancel_view(g, title='Configure Lab Table')
class FilesNode(MDDNode):
name = 'Files'
configuration_name = 'files'
executable_name = 'files_py3'
configurable = False
_dumpables = ['rootname']
class GeometryMixin(HasTraits):
geometry = Enum(*GEOMETRIES)
def __init__(self, *args, **kw):
super(GeometryMixin, self).__init__(*args, **kw)
bind_preference(self, 'geometry', 'pychron.mdd.default_geometry')
@property
def _geometry(self):
idx = 0
if self.geometry in GEOMETRIES:
idx = GEOMETRIES.index(self.geometry)
return idx+1
class ArrMeNode(MDDNode, GeometryMixin):
name = 'Arrme'
configuration_name = 'arrme'
executable_name = 'arrme_py3'
_dumpables = ('_geometry',)
def traits_view(self):
v = okcancel_view(Item('geometry'), title='Configure ArrMe')
return v
class ArrMultiNode(MDDNode):
name = 'ArrMulti'
configuration_name = 'arrmulti'
executable_name = 'arrmulti_py3'
npoints = Int(10)
_dumpables = ('npoints', 'rootname')
e = CFloat
e_err = CFloat
ordinate = CFloat
ordinate_err = CFloat
def handle_stdout(self, proc, msg):
mt = MDD_PARAM_REGEX.match(msg)
if mt:
self.e = mt.group('E')
self.e_err = mt.group('Eerr')
self.ordinate = mt.group('O')
self.ordinate_err = mt.group('Oerr')
def handle_interrupt(self, proc):
fortranlogger.debug('starting interrupt')
v = okcancel_view(VGroup(HGroup(Item('e', format_str='%0.5f'),
UItem('e_err', format_str='%0.5f')),
HGroup(Item('ordinate', format_str='%0.5f'),
UItem('ordinate_err', format_str='%0.5f'))),
width=500,
title='Edit Model Parameters')
info = self.edit_traits(v, kind='livemodal')
if info.result:
# print([str(getattr(self, v)).encode() for v in ('e', 'e_err', 'ordinate', 'ordinate_err')])
l = ' '.join([str(getattr(self, v)) for v in ('e', 'e_err', 'ordinate', 'ordinate_err')])
proc.stdin.write(l.encode())
proc.stdin.flush()
proc.stdin.close()
fortranlogger.debug('finished interrupt')
def traits_view(self):
return okcancel_view(Item('npoints'),
title='Configure ArrMulti')
class AutoArrNode(MDDNode):
name = 'AutoArr'
configuration_name = 'autoarr'
executable_name = 'autoarr_py3'
use_defaults = Bool
n_max_domains = Int(8)
n_min_domains = Int(3)
use_do_fix = Bool
_dumpables = ('use_defaults', 'n_max_domains', 'n_min_domains', 'use_do_fix')
def traits_view(self):
v = okcancel_view(Item('use_defaults'),
Item('n_max_domains'),
Item('n_min_domains'),
Item('use_do_fix'), title='Configure AutoArr')
return v
class CoolingStep(HasTraits):
ctime = Float
ctemp = Float
def __init__(self, time, temp):
self.ctime = float(time)
self.ctemp = float(temp)
class CoolingHistory(HasTraits):
steps = List
kind = Enum('Linear')
start_age = Float(10)
stop_age = Float(0)
start_temp = Float(600)
stop_temp = Float(300)
nsteps = Int(10)
generate_curve = Button
def load(self):
steps = []
if os.path.isfile(self.path):
with open(self.path, 'r') as rfile:
for line in rfile:
try:
a, b = line.split(',')
step = CoolingStep(a, b)
steps.append(step)
except ValueError:
continue
else:
for i in range(10):
step = CoolingStep(10 - 0.5 * i, 500 - i * 50)
steps.append(step)
self.steps = steps
def dump(self):
with open(self.path, 'w') as wfile:
for c in self.steps:
wfile.write('{},{}\n'.format(c.ctime, c.ctemp))
@property
def path(self):
return os.path.join(paths.appdata_dir, 'cooling_history.txt')
def _generate_curve_fired(self):
s = []
tstep = (self.start_age - self.stop_age) / (self.nsteps - 1)
ttstep = (self.start_temp - self.stop_temp) / (self.nsteps - 1)
for i in range(self.nsteps):
ctime = self.start_age - i * tstep
ctemp = self.start_temp - i * ttstep
cs = CoolingStep(ctime, ctemp)
s.append(cs)
self.steps = s
def traits_view(self):
cgrp = VGroup(HGroup(Item('start_age', label='Start'), Item('stop_age', label='Stop'), show_border=True,
label='Time'),
HGroup(Item('start_temp', label='Start'), Item('stop_temp', label='Stop'), show_border=True,
label='Temp.'),
HGroup(Item('nsteps', label='N Steps'), icon_button_editor('generate_curve', '')))
cols = [ObjectColumn(name='ctime', format='%0.1f', label='Time (Ma)'),
ObjectColumn(name='ctemp', format='%0.1f', label='Temp. (C)')]
v = View(VGroup(cgrp, UItem('steps', editor=TableEditor(columns=cols, sortable=False))))
return v
class AgesMeNode(MDDNode, GeometryMixin):
name = 'AgesMe'
configuration_name = 'agesme'
executable_name = 'agesme_py3'
cooling_history = Instance(CoolingHistory, ())
_dumpables = ('_geometry',)
def _pre_run_hook(self, state):
self.cooling_history.load()
def _finish_configure(self):
super(AgesMeNode, self)._finish_configure()
# write the agesme.in file from specified cooling history
p = self.get_path('agesme.in')
with open(p, 'w') as wfile:
steps = self.cooling_history.steps
wfile.write('{}\n'.format(len(steps)))
for ci in steps:
line = '{}\t{}\n'.format(ci.ctime, ci.ctemp)
wfile.write(line)
# include contents of arr-me file
pp = self.get_path('arr-me.in')
with open(pp, 'r') as rfile:
wfile.write(rfile.read())
self.cooling_history.dump()
def traits_view(self):
cool = VGroup(UItem('cooling_history', style='custom'), show_border=True)
geom = VGroup(Item('geometry'), show_border=True)
g = VGroup(geom, cool)
return okcancel_view(g, resizable=True, width=300, height=600, title='Configure AgesMe')
class AutoAgeFreeNode(MDDNode):
name = 'AutoAge Free'
configuration_name = 'autoagefree'
executable_name = 'autoagefree_py3'
class AutoAgeMonNode(MDDNode):
name = 'AutoAge Monotonic'
configuration_name = 'autoagemon'
executable_name = 'autoage-mon_py3'
nruns = Int(10)
max_age = Float(600)
_dumpables = ['nruns', 'max_age']
def traits_view(self):
g = VGroup(Item('nruns'),
Item('max_age'), title='Configure AutoAgeMon')
return okcancel_view(g)
class ConfIntNode(MDDNode):
name = 'Conf. Int'
configuration_name = 'confint'
executable_name = 'confint_py3'
agein = Float
agend = Float
nsteps = Int
_dumpables = ('agein', 'agend', 'nsteps')
def traits_view(self):
g = VGroup(Item('agein', label='Initial Age'),
Item('agend', label='Final Age'),
Item('nsteps', label='Number of Age Intervals'))
return okcancel_view(g)
class CorrFFTNode(MDDNode):
name = 'Corr. FFT'
executable_name = 'corrfft_py3'
class MDDFigureNode(FigureNode):
name = 'MDD Figure'
editor_klass = 'pychron.mdd.tasks.mdd_figure_editor,MDDFigureEditor'
plotter_options_manager_klass = MDDFigureOptionsManager
def run(self, state):
if not state.mdd_workspace:
state.canceled = True
return
editor = self._editor_factory()
editor.roots = state.mdd_workspace.roots
na = sorted((os.path.basename(ni) for ni in editor.roots))
na = grouped_name(na)
editor.name = '{} mdd'.format(na)
editor.replot()
state.editors.append(editor)
self.editor = editor
for name, es in groupby_key(state.editors, 'name'):
for i, ei in enumerate(es):
ei.name = '{} {:02n}'.format(ei.name, i + 1)
# ============= EOF =============================================
|
UManPychron/pychron
|
pychron/mdd/tasks/nodes.py
|
Python
|
apache-2.0
| 17,987
|
#!/usr/bin/env python
# http://ericholscher.com/blog/2009/jun/29/enable-setuppy-test-your-django-apps/
# http://www.travisswicegood.com/2010/01/17/django-virtualenv-pip-and-fabric/
# http://code.djangoproject.com/svn/django/trunk/tests/runtests.py
import os
import sys
# fix sys path so we don't need to setup PYTHONPATH
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'rest_framework_nested.runtests.settings'
import django
from django.conf import settings
from django.test.utils import get_runner
def usage():
return """
Usage: python runtests.py [UnitTestClass].[method]
You can pass the Class name of the `UnitTestClass` you want to test.
Append a method name if you only want to test a specific method of that class.
"""
def main():
TestRunner = get_runner(settings)
import ipdb
ipdb.set_trace()
test_runner = TestRunner()
if len(sys.argv) == 2:
test_case = '.' + sys.argv[1]
elif len(sys.argv) == 1:
test_case = ''
else:
print(usage())
sys.exit(1)
test_module_name = 'rest_framework_nested.tests'
if django.VERSION[0] == 1 and django.VERSION[1] < 6:
test_module_name = 'tests'
failures = test_runner.run_tests([test_module_name + test_case])
sys.exit(failures)
if __name__ == '__main__':
main()
|
alanjds/drf-nested-routers
|
rest_framework_nested/runtests/runtests.py
|
Python
|
apache-2.0
| 1,379
|
# -*- coding: utf-8 -*-
# Copyright 2015 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import uuid
import webob.dec
import webob.exc
from ooi import utils
import ooi.wsgi
application_url = "https://foo.example.org:8774/ooiv1"
tenants = {
"foo": {"id": uuid.uuid4().hex,
"name": "foo"},
"bar": {"id": uuid.uuid4().hex,
"name": "bar"},
"baz": {"id": uuid.uuid4().hex,
"name": "baz"},
}
flavors = {
1: {
"id": 1,
"name": "foo",
"vcpus": 2,
"ram": 256,
"disk": 10,
},
2: {
"id": 2,
"name": "bar",
"vcpus": 4,
"ram": 2014,
"disk": 20,
}
}
images = {
"foo": {
"id": "foo",
"name": "foo",
},
"bar": {
"id": "bar",
"name": "bar",
}
}
volumes = {
tenants["foo"]["id"]: [
{
"id": uuid.uuid4().hex,
"displayName": "foo",
"size": 2,
"status": "available",
"attachments": [],
},
{
"id": uuid.uuid4().hex,
"displayName": "bar",
"size": 3,
"status": "available",
"attachments": [],
},
{
"id": uuid.uuid4().hex,
"displayName": "baz",
"size": 5,
"status": "available",
"attachments": [],
},
],
tenants["bar"]["id"]: [],
tenants["baz"]["id"]: [
{
"id": uuid.uuid4().hex,
"displayName": "volume",
"size": 5,
"status": "in-use",
},
],
}
pools = {
tenants["foo"]["id"]: [
{
"id": "foo",
"name": "foo",
},
{
"id": "bar",
"name": "bar",
}
],
tenants["bar"]["id"]: [],
tenants["baz"]["id"]: [
{
"id": "public",
"name": "public",
},
],
}
linked_vm_id = uuid.uuid4().hex
allocated_ip = "192.168.253.23"
floating_ips = {
tenants["foo"]["id"]: [],
tenants["bar"]["id"]: [],
tenants["baz"]["id"]: [
{
"fixed_ip": "10.0.0.2",
"id": uuid.uuid4().hex,
"instance_id": linked_vm_id,
"ip": "192.168.253.1",
"pool": pools[tenants["baz"]["id"]][0]["name"],
},
{
"fixed_ip": None,
"id": uuid.uuid4().hex,
"instance_id": None,
"ip": "192.168.253.2",
"pool": pools[tenants["baz"]["id"]][0]["name"],
},
],
}
servers = {
tenants["foo"]["id"]: [
{
"id": uuid.uuid4().hex,
"name": "foo",
"flavor": {"id": flavors[1]["id"]},
"image": {"id": images["foo"]["id"]},
"status": "ACTIVE",
},
{
"id": uuid.uuid4().hex,
"name": "bar",
"flavor": {"id": flavors[2]["id"]},
"image": {"id": images["bar"]["id"]},
"status": "SHUTOFF",
},
{
"id": uuid.uuid4().hex,
"name": "baz",
"flavor": {"id": flavors[1]["id"]},
"image": {"id": images["bar"]["id"]},
"status": "ERROR",
},
],
tenants["bar"]["id"]: [],
tenants["baz"]["id"]: [
{
"id": linked_vm_id,
"name": "withvolume",
"flavor": {"id": flavors[1]["id"]},
"image": {"id": images["bar"]["id"]},
"status": "ACTIVE",
"os-extended-volumes:volumes_attached": [
{"id": volumes[tenants["baz"]["id"]][0]["id"]}
],
"addresses": {
"private": [
{"addr": floating_ips[tenants["baz"]["id"]][0]["fixed_ip"],
"OS-EXT-IPS:type": "fixed",
"OS-EXT-IPS-MAC:mac_addr": "1234"},
{"addr": floating_ips[tenants["baz"]["id"]][0]["ip"],
"OS-EXT-IPS:type": "floating",
"OS-EXT-IPS-MAC:mac_addr": "1234"},
]
}
}
],
}
# avoid circular definition of attachments
volumes[tenants["baz"]["id"]][0]["attachments"] = [{
# how consistent can OpenStack be!
# depending on using /servers/os-volume_attachments
# or /os-volumes it will return different field names
"server_id": servers[tenants["baz"]["id"]][0]["id"],
"serverId": servers[tenants["baz"]["id"]][0]["id"],
"attachment_id": uuid.uuid4().hex,
"volumeId": volumes[tenants["baz"]["id"]][0]["id"],
"volume_id": volumes[tenants["baz"]["id"]][0]["id"],
"device": "/dev/vdb",
"id": volumes[tenants["baz"]["id"]][0]["id"],
}]
def fake_query_results():
cats = []
# OCCI Core
cats.append(
'link; '
'scheme="http://schemas.ogf.org/occi/core#"; '
'class="kind"; title="link"')
cats.append(
'resource; '
'scheme="http://schemas.ogf.org/occi/core#"; '
'class="kind"; title="resource"; '
'rel="http://schemas.ogf.org/occi/core#entity"')
cats.append(
'entity; '
'scheme="http://schemas.ogf.org/occi/core#"; '
'class="kind"; title="entity"')
# OCCI Infrastructure Compute
cats.append(
'compute; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="compute resource"; '
'rel="http://schemas.ogf.org/occi/core#resource"')
cats.append(
'start; '
'scheme="http://schemas.ogf.org/occi/infrastructure/compute/action#"; '
'class="action"; title="start compute instance"')
cats.append(
'stop; '
'scheme="http://schemas.ogf.org/occi/infrastructure/compute/action#"; '
'class="action"; title="stop compute instance"')
cats.append(
'restart; '
'scheme="http://schemas.ogf.org/occi/infrastructure/compute/action#"; '
'class="action"; title="restart compute instance"')
cats.append(
'suspend; '
'scheme="http://schemas.ogf.org/occi/infrastructure/compute/action#"; '
'class="action"; title="suspend compute instance"')
# OCCI Templates
cats.append(
'os_tpl; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="mixin"; title="OCCI OS Template"')
cats.append(
'resource_tpl; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="mixin"; title="OCCI Resource Template"')
# OpenStack Images
cats.append(
'bar; '
'scheme="http://schemas.openstack.org/template/os#"; '
'class="mixin"; title="bar"; '
'rel="http://schemas.ogf.org/occi/infrastructure#os_tpl"')
cats.append(
'foo; '
'scheme="http://schemas.openstack.org/template/os#"; '
'class="mixin"; title="foo"; '
'rel="http://schemas.ogf.org/occi/infrastructure#os_tpl"')
# OpenStack Flavors
cats.append(
'1; '
'scheme="http://schemas.openstack.org/template/resource#"; '
'class="mixin"; title="Flavor: foo"; '
'rel="http://schemas.ogf.org/occi/infrastructure#resource_tpl"')
cats.append(
'2; '
'scheme="http://schemas.openstack.org/template/resource#"; '
'class="mixin"; title="Flavor: bar"; '
'rel="http://schemas.ogf.org/occi/infrastructure#resource_tpl"')
# OCCI Infrastructure Network
cats.append(
'network; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="network resource"; '
'rel="http://schemas.ogf.org/occi/core#resource"')
cats.append(
'ipnetwork; '
'scheme="http://schemas.ogf.org/occi/infrastructure/network#"; '
'class="mixin"; title="IP Networking Mixin"')
cats.append(
'up; '
'scheme="http://schemas.ogf.org/occi/infrastructure/network/action#"; '
'class="action"; title="up network instance"')
cats.append(
'down; '
'scheme="http://schemas.ogf.org/occi/infrastructure/network/action#"; '
'class="action"; title="down network instance"')
cats.append(
'networkinterface; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="network link resource"; '
'rel="http://schemas.ogf.org/occi/core#link"')
cats.append(
'ipnetworkinterface; '
'scheme="http://schemas.ogf.org/occi/infrastructure/'
'networkinterface#"; '
'class="mixin"; title="IP Network interface Mixin"')
# OCCI Infrastructure Storage
cats.append(
'storage; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="storage resource"; '
'rel="http://schemas.ogf.org/occi/core#resource"')
cats.append(
'storagelink; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="storage link resource"; '
'rel="http://schemas.ogf.org/occi/core#link"')
cats.append(
'offline; '
'scheme="http://schemas.ogf.org/occi/infrastructure/storage/action#"; '
'class="action"; title="offline storage instance"')
cats.append(
'online; '
'scheme="http://schemas.ogf.org/occi/infrastructure/storage/action#"; '
'class="action"; title="online storage instance"')
cats.append(
'backup; '
'scheme="http://schemas.ogf.org/occi/infrastructure/storage/action#"; '
'class="action"; title="backup storage instance"')
cats.append(
'resize; '
'scheme="http://schemas.ogf.org/occi/infrastructure/storage/action#"; '
'class="action"; title="resize storage instance"')
cats.append(
'snapshot; '
'scheme="http://schemas.ogf.org/occi/infrastructure/storage/action#"; '
'class="action"; title="snapshot storage instance"')
# OpenStack contextualization
cats.append(
'user_data; '
'scheme="http://schemas.openstack.org/compute/instance#"; '
'class="mixin"; title="Contextualization extension - user_data"')
cats.append(
'public_key; '
'scheme="http://schemas.openstack.org/instance/credentials#"; '
'class="mixin"; title="Contextualization extension - public_key"')
result = []
for c in cats:
result.append(("Category", c))
return result
class FakeOpenStackFault(ooi.wsgi.Fault):
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
406: "notAceptable",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
429: "overLimit",
501: "notImplemented",
503: "serviceUnavailable"}
@webob.dec.wsgify()
def __call__(self, req):
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code)
explanation = self.wrapped_exc.explanation
fault_data = {
fault_name: {
'code': code,
'message': explanation}}
self.wrapped_exc.body = utils.utf8(json.dumps(fault_data))
self.wrapped_exc.content_type = "application/json"
return self.wrapped_exc
class FakeApp(object):
"""Poor man's fake application."""
def __init__(self):
self.routes = {}
for tenant in tenants.values():
path = "/%s" % tenant["id"]
self._populate(path, "server", servers[tenant["id"]], actions=True)
self._populate(path, "volume", volumes[tenant["id"]], "os-volumes")
self._populate(path, "floating_ip_pool", pools[tenant["id"]],
"os-floating-ip-pools")
self._populate(path, "floating_ip", floating_ips[tenant["id"]],
"os-floating-ips")
# NOTE(aloga): dict_values un Py3 is not serializable in JSON
self._populate(path, "image", list(images.values()))
self._populate(path, "flavor", list(flavors.values()))
self._populate_attached_volumes(path, servers[tenant["id"]],
volumes[tenant["id"]])
def _populate(self, path_base, obj_name, obj_list,
objs_path=None, actions=[]):
objs_name = "%ss" % obj_name
if objs_path:
path = "%s/%s" % (path_base, objs_path)
else:
path = "%s/%s" % (path_base, objs_name)
objs_details_path = "%s/detail" % path
self.routes[path] = create_fake_json_resp({objs_name: obj_list})
self.routes[objs_details_path] = create_fake_json_resp(
{objs_name: obj_list})
for o in obj_list:
obj_path = "%s/%s" % (path, o["id"])
self.routes[obj_path] = create_fake_json_resp({obj_name: o})
if actions:
action_path = "%s/action" % obj_path
self.routes[action_path] = webob.Response(status=202)
def _populate_attached_volumes(self, path, server_list, vol_list):
for s in server_list:
attachments = []
if "os-extended-volumes:volumes_attached" in s:
for attach in s["os-extended-volumes:volumes_attached"]:
for v in vol_list:
if attach["id"] == v["id"]:
attachments.append(v["attachments"][0])
path_base = "%s/servers/%s/os-volume_attachments" % (path, s["id"])
self.routes[path_base] = create_fake_json_resp(
{"volumeAttachments": attachments}
)
for attach in attachments:
obj_path = "%s/%s" % (path_base, attach["id"])
self.routes[obj_path] = create_fake_json_resp(
{"volumeAttachment": attach})
@webob.dec.wsgify()
def __call__(self, req):
if req.method == "GET":
return self._do_get(req)
elif req.method == "POST":
return self._do_post(req)
elif req.method == "DELETE":
return self._do_delete(req)
def _do_create_server(self, req):
# TODO(enolfc): this should check the json is
# semantically correct
s = {"server": {"id": "foo",
"name": "foo",
"flavor": {"id": "1"},
"image": {"id": "2"},
"status": "ACTIVE"}}
return create_fake_json_resp(s)
def _do_create_volume(self, req):
# TODO(enolfc): this should check the json is
# semantically correct
s = {"volume": {"id": "foo",
"displayName": "foo",
"size": 1,
"status": "on-line"}}
return create_fake_json_resp(s)
def _do_create_attachment(self, req):
v = {"volumeAttachment": {"serverId": "foo",
"volumeId": "bar",
"device": "/dev/vdb"}}
return create_fake_json_resp(v, 202)
def _do_allocate_ip(self, req):
body = req.json_body.copy()
pool = body.popitem()
tenant = req.path_info.split('/')[1]
for p in pools[tenant]:
if p["name"] == pool[1]:
break
else:
exc = webob.exc.HTTPNotFound()
return FakeOpenStackFault(exc)
ip = {"floating_ip": {"ip": allocated_ip, "id": 1}}
return create_fake_json_resp(ip, 202)
def _do_post(self, req):
if req.path_info.endswith("servers"):
return self._do_create_server(req)
if req.path_info.endswith("os-volumes"):
return self._do_create_volume(req)
elif req.path_info.endswith("action"):
body = req.json_body.copy()
action = body.popitem()
if action[0] in ["os-start", "os-stop", "reboot",
"addFloatingIp", "removeFloatingIp"]:
return self._get_from_routes(req)
elif req.path_info.endswith("os-volume_attachments"):
return self._do_create_attachment(req)
elif req.path_info.endswith("os-floating-ips"):
return self._do_allocate_ip(req)
raise Exception
def _do_delete(self, req):
self._do_get(req)
tested_paths = {
r"/[^/]+/servers/[^/]+/os-volume_attachments/[^/]+$": 202,
r"/[^/]+/os-floating-ips/[^/]+$": 202,
r"/[^/]+/servers/[^/]+$": 204,
r"/[^/]+/os-volumes/[^/]+$": 204,
}
for p, st in tested_paths.items():
if re.match(p, req.path_info):
return create_fake_json_resp({}, st)
raise Exception
def _do_get(self, req):
return self._get_from_routes(req)
def _get_from_routes(self, req):
try:
ret = self.routes[req.path_info]
except KeyError:
exc = webob.exc.HTTPNotFound()
ret = FakeOpenStackFault(exc)
return ret
def create_fake_json_resp(data, status=200):
r = webob.Response()
r.headers["Content-Type"] = "application/json"
r.charset = "utf8"
r.body = json.dumps(data).encode("utf8")
r.status_code = status
return r
|
orviz/ooi
|
ooi/tests/fakes.py
|
Python
|
apache-2.0
| 17,912
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.api.openstack.compute import admin_actions
from nova.compute import vm_states
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
class AdminActionsPolicyTest(base.BasePolicyTest):
"""Test Admin Actions APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(AdminActionsPolicyTest, self).setUp()
self.controller = admin_actions.AdminActionsController()
self.req = fakes.HTTPRequest.blank('')
self.mock_get = self.useFixture(
fixtures.MockPatch('nova.compute.api.API.get')).mock
uuid = uuids.fake_id
self.instance = fake_instance.fake_instance_obj(
self.project_member_context,
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
# Check that admin is able to change the service
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to change the service
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
@mock.patch('nova.objects.Instance.save')
def test_reset_state_policy(self, mock_save):
rule_name = "os_compute_api:os-admin-actions:reset_state"
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name, self.controller._reset_state,
self.req, self.instance.uuid,
body={'os-resetState': {'state': 'active'}})
def test_inject_network_info_policy(self):
rule_name = "os_compute_api:os-admin-actions:inject_network_info"
with mock.patch.object(self.controller.compute_api,
"inject_network_info"):
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name,
self.controller._inject_network_info,
self.req, self.instance.uuid, body={})
def test_reset_network_policy(self):
rule_name = "os_compute_api:os-admin-actions:reset_network"
with mock.patch.object(self.controller.compute_api, "reset_network"):
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name, self.controller._reset_network,
self.req, self.instance.uuid, body={})
class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
"""Test Admin Actions APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scopped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(AdminActionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
# Check that system admin is able to perform the system level actions
# on server.
self.admin_authorized_contexts = [
self.system_admin_context]
# Check that non-system or non-admin is not able to perform the system
# level actions on server.
self.admin_unauthorized_contexts = [
self.legacy_admin_context, self.system_member_context,
self.system_reader_context, self.system_foo_context,
self.project_admin_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context
]
|
rahulunair/nova
|
nova/tests/unit/policies/test_admin_actions.py
|
Python
|
apache-2.0
| 5,313
|
# A comment, this is so you can read your program later.
# Anything after the # is ignored by python.
print "I could have code like this." # and the comment after is ignored
# You can also use a comment to "disable" or comment out a piece of code:
# print "This won't run."
print "This will run."
|
ChucklesZeClown/learn-python
|
Exercises-learn-python-the-hard-way/ex2-comments-and-pound-characters.py
|
Python
|
apache-2.0
| 301
|
#!/usr/bin/env python
# coding=utf-8
import commands
import sys
from docopt import docopt
#from handler import LogFileClient
from sdutil.log_util import getLogger
from sdutil.date_util import *
reload(sys)
sys.setdefaultencoding('utf-8')
from elasticsearch import Elasticsearch
from pdb import *
import requests
import json
logger = getLogger(__name__, __file__)
"""
host like:"http://172.17.0.33:8081"
"""
def count_from_es(host,index,query_str,startTime,endTime,scroll=False):
logger.info('search_from_es startTime:%s,endTime:%s'%(startTime,endTime))
startTimeStamp = int(str2timestamp(startTime))*1000
endTimeStamp = int(str2timestamp(endTime))*1000+999
data_post_search = {"query":{"filtered":{"query":{"query_string":{"query":query_str,"analyze_wildcard":'true'}},"filter":{"bool":{"must":[{"range":{"@timestamp":{"gte":startTimeStamp,"lte":endTimeStamp,"format":"epoch_millis"}}}],"must_not":[]}}}}}
logger.info('search_from_es,post_data:%s'%(data_post_search))
es = Elasticsearch(host,timeout=120)
response = es.count(index=index, body=data_post_search)
return response
def do_search(host,index,query_str,startTimeStamp,endTimeStamp,scroll,_source,time_step):
es = Elasticsearch(host,timeout=120)
response ={}
data_post_search = {"query":{"filtered":{"query":{"query_string":{"query":query_str,"analyze_wildcard":'true'}},"filter":{"bool":{"must":[{"range":{"@timestamp":{"gte":startTimeStamp,"lte":endTimeStamp,"format":"epoch_millis"}}}],"must_not":[]}}}}}
logger.info('search_from_es,post_data:%s'%(data_post_search))
if not scroll:
if _source:
response = es.search(index=index, body=data_post_search,size=10000,_source=_source)
else:
response = es.search(index=index, body=data_post_search,size=10000)
else:
page_size=10000
scan_resp =None
if _source:
scan_resp = es.search(index=index, body=data_post_search,search_type="scan", scroll="5m",size=page_size,_source=_source)
else:
scan_resp = es.search(index=index, body=data_post_search,search_type="scan", scroll="5m",size=page_size)
scrollId= scan_resp['_scroll_id']
response={}
total = scan_resp['hits']['total']
response_list =[]
scrollId_list =[]
for page_num in range(total/page_size + 1):
response_tmp ={}
response_tmp = es.scroll(scroll_id=scrollId, scroll= "5m")
#es.clear_scroll([scrollId])
scrollId = response_tmp['_scroll_id']
scrollId_list.append(str(scrollId))
response_list.append(response_tmp)
if response.has_key('hits'):
_hits = response['hits']
_hits['hits']+=response_tmp['hits']['hits']
response['hits'] = _hits
else:
response['hits'] = response_tmp['hits']
return response
def search_from_es(host,index,query_str,startTime,endTime,scroll=False,_source=None,time_step=0):
logger.info('search_from_es startTime:%s,endTime:%s'%(startTime,endTime))
startTimeStamp = int(str2timestamp(startTime))*1000
endTimeStamp = int(str2timestamp(endTime))*1000+999
all_response={}
timegap = endTimeStamp-startTimeStamp
if time_step>0:
_s1=startTimeStamp
_s2=startTimeStamp+time_step
run_time =0
all_response = {}
time_count = {}
while(_s2<=endTimeStamp):
response_tmp = do_search(host,index,query_str,_s1,_s2,scroll,_source,time_step)
#response_tmp = do_search(_s1,_s2)
if all_response.has_key('hits'):
_hits = all_response['hits']
_hits['hits']+=response_tmp['hits']['hits']
all_response['hits'] = _hits
else:
all_response['hits'] = response_tmp['hits']
run_time+=1
_s1=_s1+time_step
_s2 = _s2+time_step
if time_count.has_key(_s1):
time_count[_s1]+=1
else:
time_count[_s1]=1
if time_count.has_key(_s2):
time_count[_s2]+=1
else:
time_count[_s2]=1
print '----run_time:',run_time,'_s1:',_s1,',_s2:',_s2,',len:',len(all_response['hits']['hits'])
print '-s1--',time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(_s1/1000))
print '-s2--',time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(_s2/1000))
print time_count
time.sleep(2)
else:
all_response = do_search(host,index,query_str,startTimeStamp,endTimeStamp,scroll,_source,time_step)
return all_response
|
zhaochl/python-utils
|
es/elasticsearch_util.py
|
Python
|
apache-2.0
| 4,771
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
# pylint: disable=line-too-long
r"""Run training loop.
"""
# pylint: enable=line-too-long
import os
import random
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.specs.tensor_spec import TensorSpec
import tqdm
from representation_batch_rl.batch_rl import asac
from representation_batch_rl.batch_rl import awr
from representation_batch_rl.batch_rl import ddpg
from representation_batch_rl.batch_rl import evaluation
from representation_batch_rl.batch_rl import pcl
from representation_batch_rl.batch_rl import sac
from representation_batch_rl.batch_rl import sac_v1
from representation_batch_rl.batch_rl.image_utils import image_aug
from representation_batch_rl.twin_sac import utils
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'pixels-dm-cartpole-swingup',
'Environment for training/evaluation.')
flags.DEFINE_integer('seed', 42, 'Fixed random seed for training.')
flags.DEFINE_float('actor_lr', 3e-4, 'Actor learning rate.')
flags.DEFINE_float('alpha_lr', 3e-4, 'Temperature learning rate.')
flags.DEFINE_float('critic_lr', 3e-4, 'Critic learning rate.')
flags.DEFINE_integer('deployment_batch_size', 1, 'Batch size.')
flags.DEFINE_integer('sample_batch_size', 256, 'Batch size.')
flags.DEFINE_float('discount', 0.99, 'Discount used for returns.')
flags.DEFINE_float('tau', 0.005,
'Soft update coefficient for the target network.')
flags.DEFINE_integer('max_timesteps', 200_000, 'Max timesteps to train.')
flags.DEFINE_integer('max_length_replay_buffer', 100_000,
'Max replay buffer size (image observations use 100k).')
flags.DEFINE_integer('num_random_actions', 10_000,
'Fill replay buffer with N random actions.')
flags.DEFINE_integer('start_training_timesteps', 10_000,
'Start training when replay buffer contains N timesteps.')
flags.DEFINE_string('save_dir', '/tmp/save/', 'Directory to save results to.')
flags.DEFINE_integer('log_interval', 1_000, 'Log every N timesteps.')
flags.DEFINE_integer('eval_interval', 10_000, 'Evaluate every N timesteps.')
flags.DEFINE_integer('action_repeat', 8,
'(optional) action repeat used when instantiating env.')
flags.DEFINE_integer('frame_stack', 0,
'(optional) frame stack used when instantiating env.')
flags.DEFINE_enum('algo_name', 'sac', [
'ddpg',
'crossnorm_ddpg',
'sac',
'pc_sac',
'pcl',
'crossnorm_sac',
'crr',
'awr',
'sac_v1',
'asac',
], 'Algorithm.')
flags.DEFINE_boolean('eager', False, 'Execute functions eagerly.')
def main(_):
if FLAGS.eager:
tf.config.experimental_run_functions_eagerly(FLAGS.eager)
tf.random.set_seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
random.seed(FLAGS.seed)
action_repeat = FLAGS.action_repeat
_, _, domain_name, _ = FLAGS.env_name.split('-')
if domain_name in ['cartpole']:
FLAGS.set_default('action_repeat', 8)
elif domain_name in ['reacher', 'cheetah', 'ball_in_cup', 'hopper']:
FLAGS.set_default('action_repeat', 4)
elif domain_name in ['finger', 'walker']:
FLAGS.set_default('action_repeat', 2)
FLAGS.set_default('max_timesteps', FLAGS.max_timesteps // FLAGS.action_repeat)
env = utils.load_env(
FLAGS.env_name, FLAGS.seed, action_repeat, FLAGS.frame_stack)
eval_env = utils.load_env(
FLAGS.env_name, FLAGS.seed, action_repeat, FLAGS.frame_stack)
is_image_obs = (isinstance(env.observation_spec(), TensorSpec) and
len(env.observation_spec().shape) == 3)
spec = (
env.observation_spec(),
env.action_spec(),
env.reward_spec(),
env.reward_spec(), # discount spec
env.observation_spec() # next observation spec
)
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
spec, batch_size=1, max_length=FLAGS.max_length_replay_buffer)
@tf.function
def add_to_replay(state, action, reward, discount, next_states):
replay_buffer.add_batch((state, action, reward, discount, next_states))
hparam_str = utils.make_hparam_string(
FLAGS.xm_parameters, seed=FLAGS.seed, env_name=FLAGS.env_name,
algo_name=FLAGS.algo_name)
summary_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.save_dir, 'tb', hparam_str))
results_writer = tf.summary.create_file_writer(
os.path.join(FLAGS.save_dir, 'results', hparam_str))
if 'ddpg' in FLAGS.algo_name:
model = ddpg.DDPG(
env.observation_spec(),
env.action_spec(),
cross_norm='crossnorm' in FLAGS.algo_name)
elif 'crr' in FLAGS.algo_name:
model = awr.AWR(
env.observation_spec(),
env.action_spec(), f='bin_max')
elif 'awr' in FLAGS.algo_name:
model = awr.AWR(
env.observation_spec(),
env.action_spec(), f='exp_mean')
elif 'sac_v1' in FLAGS.algo_name:
model = sac_v1.SAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
elif 'asac' in FLAGS.algo_name:
model = asac.ASAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
elif 'sac' in FLAGS.algo_name:
model = sac.SAC(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0],
cross_norm='crossnorm' in FLAGS.algo_name,
pcl_actor_update='pc' in FLAGS.algo_name)
elif 'pcl' in FLAGS.algo_name:
model = pcl.PCL(
env.observation_spec(),
env.action_spec(),
target_entropy=-env.action_spec().shape[0])
initial_collect_policy = random_tf_policy.RandomTFPolicy(
env.time_step_spec(), env.action_spec())
dataset = replay_buffer.as_dataset(
num_parallel_calls=tf.data.AUTOTUNE,
sample_batch_size=FLAGS.sample_batch_size)
if is_image_obs:
# Augment images as in DRQ.
dataset = dataset.map(image_aug,
num_parallel_calls=tf.data.AUTOTUNE,
deterministic=False).prefetch(3)
else:
dataset = dataset.prefetch(3)
def repack(*data):
return data[0]
dataset = dataset.map(repack)
replay_buffer_iter = iter(dataset)
previous_time = time.time()
timestep = env.reset()
episode_return = 0
episode_timesteps = 0
step_mult = 1 if action_repeat < 1 else action_repeat
for i in tqdm.tqdm(range(FLAGS.max_timesteps)):
if i % FLAGS.deployment_batch_size == 0:
for _ in range(FLAGS.deployment_batch_size):
if timestep.is_last():
if episode_timesteps > 0:
current_time = time.time()
with summary_writer.as_default():
tf.summary.scalar(
'train/returns',
episode_return,
step=(i + 1) * step_mult)
tf.summary.scalar(
'train/FPS',
episode_timesteps / (current_time - previous_time),
step=(i + 1) * step_mult)
timestep = env.reset()
episode_return = 0
episode_timesteps = 0
previous_time = time.time()
if (replay_buffer.num_frames() < FLAGS.num_random_actions or
replay_buffer.num_frames() < FLAGS.deployment_batch_size):
# Use policy only after the first deployment.
policy_step = initial_collect_policy.action(timestep)
action = policy_step.action
else:
action = model.actor(timestep.observation, sample=True)
next_timestep = env.step(action)
add_to_replay(timestep.observation, action, next_timestep.reward,
next_timestep.discount, next_timestep.observation)
episode_return += next_timestep.reward[0]
episode_timesteps += 1
timestep = next_timestep
if i + 1 >= FLAGS.start_training_timesteps:
with summary_writer.as_default():
info_dict = model.update_step(replay_buffer_iter)
if (i + 1) % FLAGS.log_interval == 0:
with summary_writer.as_default():
for k, v in info_dict.items():
tf.summary.scalar(f'training/{k}', v, step=(i + 1) * step_mult)
if (i + 1) % FLAGS.eval_interval == 0:
logging.info('Performing policy eval.')
average_returns, evaluation_timesteps = evaluation.evaluate(
eval_env, model)
with results_writer.as_default():
tf.summary.scalar(
'evaluation/returns', average_returns, step=(i + 1) * step_mult)
tf.summary.scalar(
'evaluation/length', evaluation_timesteps, step=(i+1) * step_mult)
logging.info('Eval at %d: ave returns=%f, ave episode length=%f',
(i + 1) * step_mult, average_returns, evaluation_timesteps)
if (i + 1) % FLAGS.eval_interval == 0:
model.save_weights(
os.path.join(FLAGS.save_dir, 'results',
FLAGS.env_name + '__' + str(i + 1)))
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
representation_batch_rl/batch_rl/train_eval_online.py
|
Python
|
apache-2.0
| 9,735
|
"""
Test that SBProcess.LoadImageUsingPaths works correctly.
"""
from __future__ import print_function
import os
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
@skipIfWindows # The Windows platform doesn't implement DoLoadImage.
class LoadUsingPathsTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Make the hidden directory in the build hierarchy:
lldbutil.mkdir_p(self.getBuildArtifact("hidden"))
# Invoke the default build rule.
self.build()
ext = 'so'
if self.platformIsDarwin():
ext = 'dylib'
self.lib_name = 'libloadunload.' + ext
self.wd = self.getBuildDir()
self.hidden_dir = os.path.join(self.wd, 'hidden')
self.hidden_lib = os.path.join(self.hidden_dir, self.lib_name)
@skipIfFreeBSD # llvm.org/pr14424 - missing FreeBSD Makefiles/testcase support
@not_remote_testsuite_ready
@skipIfWindows # Windows doesn't have dlopen and friends, dynamic libraries work differently
@expectedFlakeyNetBSD
def test_load_using_paths(self):
"""Test that we can load a module by providing a set of search paths."""
if self.platformIsDarwin():
dylibName = 'libloadunload_d.dylib'
else:
dylibName = 'libloadunload_d.so'
# The directory with the dynamic library we did not link to.
path_dir = os.path.join(self.getBuildDir(), "hidden")
(target, process, thread,
_) = lldbutil.run_to_source_breakpoint(self,
"Break here to do the load using paths",
lldb.SBFileSpec("main.cpp"))
error = lldb.SBError()
lib_spec = lldb.SBFileSpec(self.lib_name)
paths = lldb.SBStringList()
paths.AppendString(self.wd)
paths.AppendString(os.path.join(self.wd, "no_such_dir"))
out_spec = lldb.SBFileSpec()
# First try with no correct directories on the path, and make sure that doesn't blow up:
token = process.LoadImageUsingPaths(lib_spec, paths, out_spec, error)
self.assertEqual(token, lldb.LLDB_INVALID_IMAGE_TOKEN, "Only looked on the provided path.")
# Now add the correct dir to the paths list and try again:
paths.AppendString(self.hidden_dir)
token = process.LoadImageUsingPaths(lib_spec, paths, out_spec, error)
self.assertNotEqual(token, lldb.LLDB_INVALID_IMAGE_TOKEN, "Got a valid token")
self.assertEqual(out_spec, lldb.SBFileSpec(self.hidden_lib), "Found the expected library")
# Make sure this really is in the image list:
loaded_module = target.FindModule(out_spec)
self.assertTrue(loaded_module.IsValid(), "The loaded module is in the image list.")
# Now see that we can call a function in the loaded module.
value = thread.frames[0].EvaluateExpression("d_function()", lldb.SBExpressionOptions())
self.assertTrue(value.GetError().Success(), "Got a value from the expression")
ret_val = value.GetValueAsSigned()
self.assertEqual(ret_val, 12345, "Got the right value")
# Make sure the token works to unload it:
process.UnloadImage(token)
# Make sure this really is no longer in the image list:
loaded_module = target.FindModule(out_spec)
self.assertFalse(loaded_module.IsValid(), "The unloaded module is no longer in the image list.")
# Make sure a relative path also works:
paths.Clear()
paths.AppendString(os.path.join(self.wd, "no_such_dir"))
paths.AppendString(self.wd)
relative_spec = lldb.SBFileSpec(os.path.join("hidden", self.lib_name))
out_spec = lldb.SBFileSpec()
token = process.LoadImageUsingPaths(relative_spec, paths, out_spec, error)
self.assertNotEqual(token, lldb.LLDB_INVALID_IMAGE_TOKEN, "Got a valid token with relative path")
self.assertEqual(out_spec, lldb.SBFileSpec(self.hidden_lib), "Found the expected library with relative path")
process.UnloadImage(token)
# Make sure the presence of an empty path doesn't mess anything up:
paths.Clear()
paths.AppendString("")
paths.AppendString(os.path.join(self.wd, "no_such_dir"))
paths.AppendString(self.wd)
relative_spec = lldb.SBFileSpec(os.path.join("hidden", self.lib_name))
out_spec = lldb.SBFileSpec()
token = process.LoadImageUsingPaths(relative_spec, paths, out_spec, error)
self.assertNotEqual(token, lldb.LLDB_INVALID_IMAGE_TOKEN, "Got a valid token with included empty path")
self.assertEqual(out_spec, lldb.SBFileSpec(self.hidden_lib), "Found the expected library with included empty path")
process.UnloadImage(token)
# Finally, passing in an absolute path should work like the basename:
# This should NOT work because we've taken hidden_dir off the paths:
abs_spec = lldb.SBFileSpec(os.path.join(self.hidden_dir, self.lib_name))
token = process.LoadImageUsingPaths(lib_spec, paths, out_spec, error)
self.assertEqual(token, lldb.LLDB_INVALID_IMAGE_TOKEN, "Only looked on the provided path.")
# But it should work when we add the dir:
# Now add the correct dir to the paths list and try again:
paths.AppendString(self.hidden_dir)
token = process.LoadImageUsingPaths(lib_spec, paths, out_spec, error)
self.assertNotEqual(token, lldb.LLDB_INVALID_IMAGE_TOKEN, "Got a valid token")
self.assertEqual(out_spec, lldb.SBFileSpec(self.hidden_lib), "Found the expected library")
|
apple/swift-lldb
|
packages/Python/lldbsuite/test/functionalities/load_using_paths/TestLoadUsingPaths.py
|
Python
|
apache-2.0
| 5,848
|
#! /usr/bin/python
# -*- coding:utf-8 -*-
"""
Author: AsherYang
Email: ouyangfan1991@gmail.com
Date: 2018/6/27
Desc: 后台管理数据库操作类
"""
import sys
sys.path.append('../')
from util import DbUtil
from util.DateUtil import DateUtil
class AdminDao:
def __init__(self):
pass
# 根据管理员电话号码查询管理员信息
def queryByTel(self, admin_tel):
query = 'SELECT * FROM ffstore_admin WHERE admin_tel = "%s" ' % admin_tel
return DbUtil.query(query)
# 根据管理员号码和密码查询管理员信息
def queryByTelAndPwd(self, admin_tel, sms_pwd):
query = 'SELECT * FROM ffstore_admin WHERE admin_tel = "%s" and sms_pwd = "%s" ' % (admin_tel, sms_pwd)
return DbUtil.query(query)
# 更新短信验证码
def updateSmsPwd(self, admin_tel, sms_pwd):
current_time = DateUtil().getCurrentTimeStamp()
update = 'update ffstore_admin set sms_pwd = "%s", login_time = "%s" where admin_tel = "%s"' \
% (sms_pwd, current_time, admin_tel)
return DbUtil.update(update)
# 更新登录时间(时间戳格式,秒级别)
def updateLoginTime(self, admin_tel, login_time):
current_time = DateUtil().getCurrentTimeStamp()
if not login_time or login_time > current_time:
return False
update = 'update ffstore_admin set login_time = "%s" where admin_tel = "%s"' % (login_time, admin_tel)
return DbUtil.update(update)
|
AsherYang/ThreeLine
|
server/ffstore/db/AdminDao.py
|
Python
|
apache-2.0
| 1,494
|
"""The tests for the emulated Hue component."""
import asyncio
import json
from unittest.mock import patch
import pytest
from homeassistant import bootstrap, const, core
import homeassistant.components as core_components
from homeassistant.components import (
emulated_hue, http, light, script, media_player, fan
)
from homeassistant.const import STATE_ON, STATE_OFF
from homeassistant.components.emulated_hue.hue_api import (
HUE_API_STATE_ON, HUE_API_STATE_BRI, HueUsernameView,
HueAllLightsStateView, HueOneLightStateView, HueOneLightChangeView)
from homeassistant.components.emulated_hue import Config
from tests.common import (
get_test_instance_port, mock_http_component_app)
HTTP_SERVER_PORT = get_test_instance_port()
BRIDGE_SERVER_PORT = get_test_instance_port()
BRIDGE_URL_BASE = 'http://127.0.0.1:{}'.format(BRIDGE_SERVER_PORT) + '{}'
JSON_HEADERS = {const.HTTP_HEADER_CONTENT_TYPE: const.CONTENT_TYPE_JSON}
@pytest.fixture
def hass_hue(loop, hass):
"""Setup a hass instance for these tests."""
# We need to do this to get access to homeassistant/turn_(on,off)
loop.run_until_complete(
core_components.async_setup(hass, {core.DOMAIN: {}}))
loop.run_until_complete(bootstrap.async_setup_component(
hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: HTTP_SERVER_PORT}}))
with patch('homeassistant.components'
'.emulated_hue.UPNPResponderThread'):
loop.run_until_complete(
bootstrap.async_setup_component(hass, emulated_hue.DOMAIN, {
emulated_hue.DOMAIN: {
emulated_hue.CONF_LISTEN_PORT: BRIDGE_SERVER_PORT,
emulated_hue.CONF_EXPOSE_BY_DEFAULT: True
}
}))
loop.run_until_complete(
bootstrap.async_setup_component(hass, light.DOMAIN, {
'light': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
bootstrap.async_setup_component(hass, script.DOMAIN, {
'script': {
'set_kitchen_light': {
'sequence': [
{
'service_template':
"light.turn_{{ requested_state }}",
'data_template': {
'entity_id': 'light.kitchen_lights',
'brightness': "{{ requested_level }}"
}
}
]
}
}
}))
loop.run_until_complete(
bootstrap.async_setup_component(hass, media_player.DOMAIN, {
'media_player': [
{
'platform': 'demo',
}
]
}))
loop.run_until_complete(
bootstrap.async_setup_component(hass, fan.DOMAIN, {
'fan': [
{
'platform': 'demo',
}
]
}))
# Kitchen light is explicitly excluded from being exposed
kitchen_light_entity = hass.states.get('light.kitchen_lights')
attrs = dict(kitchen_light_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE] = False
hass.states.async_set(
kitchen_light_entity.entity_id, kitchen_light_entity.state,
attributes=attrs)
# Expose the script
script_entity = hass.states.get('script.set_kitchen_light')
attrs = dict(script_entity.attributes)
attrs[emulated_hue.ATTR_EMULATED_HUE] = True
hass.states.async_set(
script_entity.entity_id, script_entity.state, attributes=attrs
)
return hass
@pytest.fixture
def hue_client(loop, hass_hue, test_client):
"""Create web client for emulated hue api."""
web_app = mock_http_component_app(hass_hue)
config = Config(None, {'type': 'alexa'})
HueUsernameView().register(web_app.router)
HueAllLightsStateView(config).register(web_app.router)
HueOneLightStateView(config).register(web_app.router)
HueOneLightChangeView(config).register(web_app.router)
return loop.run_until_complete(test_client(web_app))
@asyncio.coroutine
def test_discover_lights(hue_client):
"""Test the discovery of lights."""
result = yield from hue_client.get('/api/username/lights')
assert result.status == 200
assert 'application/json' in result.headers['content-type']
result_json = yield from result.json()
devices = set(val['uniqueid'] for val in result_json.values())
# Make sure the lights we added to the config are there
assert 'light.ceiling_lights' in devices
assert 'light.bed_light' in devices
assert 'script.set_kitchen_light' in devices
assert 'light.kitchen_lights' not in devices
assert 'media_player.living_room' in devices
assert 'media_player.bedroom' in devices
assert 'media_player.walkman' in devices
assert 'media_player.lounge_room' in devices
assert 'fan.living_room_fan' in devices
@asyncio.coroutine
def test_get_light_state(hass_hue, hue_client):
"""Test the getting of light state."""
# Turn office light on and set to 127 brightness
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_ON,
{
const.ATTR_ENTITY_ID: 'light.ceiling_lights',
light.ATTR_BRIGHTNESS: 127
},
blocking=True)
office_json = yield from perform_get_light_state(
hue_client, 'light.ceiling_lights', 200)
assert office_json['state'][HUE_API_STATE_ON] is True
assert office_json['state'][HUE_API_STATE_BRI] == 127
# Check all lights view
result = yield from hue_client.get('/api/username/lights')
assert result.status == 200
assert 'application/json' in result.headers['content-type']
result_json = yield from result.json()
assert 'light.ceiling_lights' in result_json
assert result_json['light.ceiling_lights']['state'][HUE_API_STATE_BRI] == \
127
# Turn bedroom light off
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{
const.ATTR_ENTITY_ID: 'light.bed_light'
},
blocking=True)
bedroom_json = yield from perform_get_light_state(
hue_client, 'light.bed_light', 200)
assert bedroom_json['state'][HUE_API_STATE_ON] is False
assert bedroom_json['state'][HUE_API_STATE_BRI] == 0
# Make sure kitchen light isn't accessible
yield from perform_get_light_state(
hue_client, 'light.kitchen_lights', 404)
@asyncio.coroutine
def test_put_light_state(hass_hue, hue_client):
"""Test the seeting of light states."""
yield from perform_put_test_on_ceiling_lights(hass_hue, hue_client)
# Turn the bedroom light on first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_ON,
{const.ATTR_ENTITY_ID: 'light.bed_light',
light.ATTR_BRIGHTNESS: 153},
blocking=True)
bed_light = hass_hue.states.get('light.bed_light')
assert bed_light.state == STATE_ON
assert bed_light.attributes[light.ATTR_BRIGHTNESS] == 153
# Go through the API to turn it off
bedroom_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.bed_light', False)
bedroom_result_json = yield from bedroom_result.json()
assert bedroom_result.status == 200
assert 'application/json' in bedroom_result.headers['content-type']
assert len(bedroom_result_json) == 1
# Check to make sure the state changed
bed_light = hass_hue.states.get('light.bed_light')
assert bed_light.state == STATE_OFF
# Make sure we can't change the kitchen light state
kitchen_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.kitchen_light', True)
assert kitchen_result.status == 404
@asyncio.coroutine
def test_put_light_state_script(hass_hue, hue_client):
"""Test the setting of script variables."""
# Turn the kitchen light off first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'light.kitchen_lights'},
blocking=True)
# Emulated hue converts 0-100% to 0-255.
level = 23
brightness = round(level * 255 / 100)
script_result = yield from perform_put_light_state(
hass_hue, hue_client,
'script.set_kitchen_light', True, brightness)
script_result_json = yield from script_result.json()
assert script_result.status == 200
assert len(script_result_json) == 2
kitchen_light = hass_hue.states.get('light.kitchen_lights')
assert kitchen_light.state == 'on'
assert kitchen_light.attributes[light.ATTR_BRIGHTNESS] == level
@asyncio.coroutine
def test_put_light_state_media_player(hass_hue, hue_client):
"""Test turning on media player and setting volume."""
# Turn the music player off first
yield from hass_hue.services.async_call(
media_player.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'media_player.walkman'},
blocking=True)
# Emulated hue converts 0.0-1.0 to 0-255.
level = 0.25
brightness = round(level * 255)
mp_result = yield from perform_put_light_state(
hass_hue, hue_client,
'media_player.walkman', True, brightness)
mp_result_json = yield from mp_result.json()
assert mp_result.status == 200
assert len(mp_result_json) == 2
walkman = hass_hue.states.get('media_player.walkman')
assert walkman.state == 'playing'
assert walkman.attributes[media_player.ATTR_MEDIA_VOLUME_LEVEL] == level
@asyncio.coroutine
def test_put_light_state_fan(hass_hue, hue_client):
"""Test turning on fan and setting speed."""
# Turn the fan off first
yield from hass_hue.services.async_call(
fan.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'fan.living_room_fan'},
blocking=True)
# Emulated hue converts 0-100% to 0-255.
level = 23
brightness = round(level * 255 / 100)
fan_result = yield from perform_put_light_state(
hass_hue, hue_client,
'fan.living_room_fan', True, brightness)
fan_result_json = yield from fan_result.json()
assert fan_result.status == 200
assert len(fan_result_json) == 2
living_room_fan = hass_hue.states.get('fan.living_room_fan')
assert living_room_fan.state == 'on'
assert living_room_fan.attributes[fan.ATTR_SPEED] == fan.SPEED_MEDIUM
# pylint: disable=invalid-name
@asyncio.coroutine
def test_put_with_form_urlencoded_content_type(hass_hue, hue_client):
"""Test the form with urlencoded content."""
# Needed for Alexa
yield from perform_put_test_on_ceiling_lights(
hass_hue, hue_client, 'application/x-www-form-urlencoded')
# Make sure we fail gracefully when we can't parse the data
data = {'key1': 'value1', 'key2': 'value2'}
result = yield from hue_client.put(
'/api/username/lights/light.ceiling_lights/state',
headers={
'content-type': 'application/x-www-form-urlencoded'
},
data=data,
)
assert result.status == 400
@asyncio.coroutine
def test_entity_not_found(hue_client):
"""Test for entity which are not found."""
result = yield from hue_client.get(
'/api/username/lights/not.existant_entity')
assert result.status == 404
result = yield from hue_client.put(
'/api/username/lights/not.existant_entity/state')
assert result.status == 404
@asyncio.coroutine
def test_allowed_methods(hue_client):
"""Test the allowed methods."""
result = yield from hue_client.get(
'/api/username/lights/light.ceiling_lights/state')
assert result.status == 405
result = yield from hue_client.put(
'/api/username/lights/light.ceiling_lights')
assert result.status == 405
result = yield from hue_client.put(
'/api/username/lights')
assert result.status == 405
@asyncio.coroutine
def test_proper_put_state_request(hue_client):
"""Test the request to set the state."""
# Test proper on value parsing
result = yield from hue_client.put(
'/api/username/lights/{}/state'.format(
'light.ceiling_lights'),
data=json.dumps({HUE_API_STATE_ON: 1234}))
assert result.status == 400
# Test proper brightness value parsing
result = yield from hue_client.put(
'/api/username/lights/{}/state'.format(
'light.ceiling_lights'),
data=json.dumps({
HUE_API_STATE_ON: True,
HUE_API_STATE_BRI: 'Hello world!'
}))
assert result.status == 400
# pylint: disable=invalid-name
def perform_put_test_on_ceiling_lights(hass_hue, hue_client,
content_type='application/json'):
"""Test the setting of a light."""
# Turn the office light off first
yield from hass_hue.services.async_call(
light.DOMAIN, const.SERVICE_TURN_OFF,
{const.ATTR_ENTITY_ID: 'light.ceiling_lights'},
blocking=True)
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_OFF
# Go through the API to turn it on
office_result = yield from perform_put_light_state(
hass_hue, hue_client,
'light.ceiling_lights', True, 56, content_type)
assert office_result.status == 200
assert 'application/json' in office_result.headers['content-type']
office_result_json = yield from office_result.json()
assert len(office_result_json) == 2
# Check to make sure the state changed
ceiling_lights = hass_hue.states.get('light.ceiling_lights')
assert ceiling_lights.state == STATE_ON
assert ceiling_lights.attributes[light.ATTR_BRIGHTNESS] == 56
@asyncio.coroutine
def perform_get_light_state(client, entity_id, expected_status):
"""Test the gettting of a light state."""
result = yield from client.get('/api/username/lights/{}'.format(entity_id))
assert result.status == expected_status
if expected_status == 200:
assert 'application/json' in result.headers['content-type']
return (yield from result.json())
return None
@asyncio.coroutine
def perform_put_light_state(hass_hue, client, entity_id, is_on,
brightness=None, content_type='application/json'):
"""Test the setting of a light state."""
req_headers = {'Content-Type': content_type}
data = {HUE_API_STATE_ON: is_on}
if brightness is not None:
data[HUE_API_STATE_BRI] = brightness
result = yield from client.put(
'/api/username/lights/{}/state'.format(entity_id), headers=req_headers,
data=json.dumps(data).encode())
# Wait until state change is complete before continuing
yield from hass_hue.async_block_till_done()
return result
|
eagleamon/home-assistant
|
tests/components/emulated_hue/test_hue_api.py
|
Python
|
apache-2.0
| 14,968
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('reddit', '0004_auto_20160518_0017'),
]
operations = [
migrations.AlterField(
model_name='redditcredentials',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, unique=True),
),
]
|
kiwiheretic/logos-v2
|
reddit/migrations/0005_auto_20160518_0031.py
|
Python
|
apache-2.0
| 471
|
def run(*args, **kwargs):
_run(*args, **kwargs)
def _run(task=None):
if task is None:
return
task()
|
githubutilities/gas
|
gas/interface.py
|
Python
|
apache-2.0
| 106
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from oslo_config import cfg
import oslo_messaging as messaging
from osprofiler import profiler
from senlin.common import consts
from senlin.common import context
# An alias for the default serializer
JsonPayloadSerializer = messaging.JsonPayloadSerializer
TRANSPORT = None
NOTIFICATION_TRANSPORT = None
NOTIFIER = None
class RequestContextSerializer(messaging.Serializer):
def __init__(self, base):
self._base = base
def serialize_entity(self, ctxt, entity):
if not self._base:
return entity
return self._base.serialize_entity(ctxt, entity)
def deserialize_entity(self, ctxt, entity):
if not self._base:
return entity
return self._base.deserialize_entity(ctxt, entity)
@staticmethod
def serialize_context(ctxt):
_context = ctxt.to_dict()
prof = profiler.get()
if prof:
trace_info = {
"hmac_key": prof.hmac_key,
"base_id": prof.get_base_id(),
"parent_id": prof.get_id()
}
_context.update({"trace_info": trace_info})
return _context
@staticmethod
def deserialize_context(ctxt):
trace_info = ctxt.pop("trace_info", None)
if trace_info:
profiler.init(**trace_info)
return context.RequestContext.from_dict(ctxt)
def setup(url=None, optional=False):
"""Initialise the oslo_messaging layer."""
global TRANSPORT, GLOBAL_TRANSPORT, NOTIFIER
if url and url.startswith("fake://"):
# NOTE: oslo_messaging fake driver uses time.sleep
# for task switch, so we need to monkey_patch it
eventlet.monkey_patch(time=True)
messaging.set_transport_defaults('senlin')
if not TRANSPORT:
exmods = ['senlin.common.exception']
try:
TRANSPORT = messaging.get_rpc_transport(
cfg.CONF, url, allowed_remote_exmods=exmods)
except messaging.InvalidTransportURL as e:
TRANSPORT = None
if not optional or e.url:
# NOTE: oslo_messaging is configured but unloadable
# so reraise the exception
raise
if not NOTIFIER:
exmods = ['senlin.common.exception']
try:
NOTIFICATION_TRANSPORT = messaging.get_notification_transport(
cfg.CONF, allowed_remote_exmods=exmods)
except Exception as e:
raise
serializer = RequestContextSerializer(JsonPayloadSerializer())
NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
serializer=serializer,
topics=cfg.CONF.notification_topics)
def cleanup():
"""Cleanup the oslo_messaging layer."""
global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER
if TRANSPORT:
TRANSPORT.cleanup()
TRANSPORT = None
NOTIFIER = None
if NOTIFICATION_TRANSPORT:
NOTIFICATION_TRANSPORT.cleanup()
NOTIFICATION_TRANSPORT = None
def get_rpc_server(target, endpoint, serializer=None):
"""Return a configured oslo_messaging rpc server."""
if serializer is None:
serializer = JsonPayloadSerializer()
serializer = RequestContextSerializer(serializer)
return messaging.get_rpc_server(TRANSPORT, target, [endpoint],
executor='eventlet',
serializer=serializer)
def get_rpc_client(topic, server, serializer=None):
"""Return a configured oslo_messaging RPCClient."""
target = messaging.Target(topic=topic, server=server,
version=consts.RPC_API_VERSION_BASE)
if serializer is None:
serializer = JsonPayloadSerializer()
serializer = RequestContextSerializer(serializer)
return messaging.RPCClient(TRANSPORT, target, serializer=serializer)
def get_notifier(publisher_id):
"""Return a configured oslo_messaging notifier."""
global NOTIFIER
return NOTIFIER.prepare(publisher_id=publisher_id)
|
stackforge/senlin
|
senlin/common/messaging.py
|
Python
|
apache-2.0
| 4,618
|
"""
Class for parallelizing RandomizedSearchCV jobs in scikit-learn
"""
from sklearn.model_selection import ParameterSampler
from spark_sklearn.base_search import SparkBaseSearchCV
class RandomizedSearchCV(SparkBaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all combinations of parameter values are tried
out, but rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used for all parameters.
It is highly recommended to use continuous distributions for continuous
parameters.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Not used; exists for scikit-learn compatibility.
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_....|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.8 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.7 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.8 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.9 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.8, 0.7, 0.8, 0.9],
'split1_test_score' : [0.82, 0.5, 0.7, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.82],
'std_test_score' : [0.02, 0.01, 0.03, 0.03],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, sc, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
self.fit_params = fit_params if fit_params is not None else {}
self.sc = sc
self.cv_results_ = None
def fit(self, X, y=None, groups=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, groups, sampled_params)
|
databricks/spark-sklearn
|
python/spark_sklearn/random_search.py
|
Python
|
apache-2.0
| 10,095
|
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from common import *
import testdata
class oldstyle:
def __init__(self, value): self.value = value
def __repr__(self): return "oldstyle(%s)" % self.value
def __add__(self, other): return self.value + other
def __sub__(self, other): return self.value - other
def __mul__(self, other): return self.value * other
def __div__(self, other): return self.value / other
def __floordiv__(self, other): return self.value // other
def __mod__(self, other): return self.value % other
def __divmod__(self, other): return divmod(self.value, other)
def __pow__(self, other): return self.value ** other
def __lshift__(self, other): return self.value << other
def __rshift__(self, other): return self.value >> other
def __and__(self, other): return self.value & other
def __xor__(self, other): return self.value ^ other
def __or__(self, other): return self.value | other
class oldstyle_reflect:
def __init__(self, value): self.value = value
def __repr__(self): return "oldstyle_reflect(%s)" % self.value
def __radd__(self, other): return other + self.value
def __rsub__(self, other): return other - self.value
def __rmul__(self, other):
print("\toldstyle_reflect.__rmul__")
return other * self.value
def __rdiv__(self, other): return other / self.value
def __rfloordiv__(self, other): return other // self.value
def __rmod__(self, other): return other % self.value
def __rdivmod__(self, other): return divmod(other, self.value)
def __rpow__(self, other): return other ** self.value
def __rlshift__(self, other): return other << self.value
def __rrshift__(self, other): return other >> self.value
def __rand__(self, other): return self.value & other
def __rxor__(self, other): return self.value ^ other
def __ror__(self, other): return self.value | other
class oldstyle_inplace:
def __init__(self, value): self.value = value
def __repr__(self): return "oldstyle_inplace(%s)" % self.value
def __iadd__(self, other): return self.value + other
def __isub__(self, other): return self.value - other
def __imul__(self, other): return self.value * other
def __idiv__(self, other): return self.value / other
def __ifloordiv__(self, other): return self.value // other
def __imod__(self, other): return self.value % other
def __idivmod__(self, other): return divmod(self.value, other)
def __ipow__(self, other): return self.value ** other
def __ilshift__(self, other): return self.value << other
def __irshift__(self, other): return self.value >> other
def __iand__(self, other): return self.value & other
def __ixor__(self, other): return self.value ^ other
def __ior__(self, other): return self.value | other
class oldstyle_notdefined:
def __init__(self, value): self.value = value
def __repr__(self): return "oldstyle_notdefined(%s)" % self.value
class newstyle(object):
def __init__(self, value): self.value = value
def __repr__(self): return "newstyle(%s, %r)" % (self.value, type(self.value))
def __add__(self, other): return self.value + other
def __sub__(self, other): return self.value - other
def __mul__(self, other): return self.value * other
def __div__(self, other): return self.value / other
def __floordiv__(self, other): return self.value // other
def __mod__(self, other): return self.value % other
def __divmod__(self, other): return divmod(self.value, other)
def __pow__(self, other): return self.value ** other
def __lshift__(self, other): return self.value << other
def __rshift__(self, other): return self.value >> other
def __and__(self, other): return self.value & other
def __xor__(self, other): return self.value ^ other
def __or__(self, other): return self.value | other
class newstyle_reflect(object):
def __init__(self, value): self.value = value
def __repr__(self): return "newstyle_reflect(%s, %r)" % (self.value, type(self.value))
def __radd__(self, other): return other + self.value
def __rsub__(self, other): return other - self.value
def __rmul__(self, other):
print("\tnewstyle_reflect.__rmul__")
return other * self.value
def __rdiv__(self, other): return other / self.value
def __rfloordiv__(self, other): return other // self.value
def __rmod__(self, other): return other % self.value
def __rdivmod__(self, other): return divmod(other, self.value)
def __rpow__(self, other): return other ** self.value
def __rlshift__(self, other): return other << self.value
def __rrshift__(self, other): return other >> self.value
def __rand__(self, other): return self.value & other
def __rxor__(self, other): return self.value ^ other
def __ror__(self, other): return self.value | other
class newstyle_inplace(object):
def __init__(self, value): self.value = value
def __repr__(self): return "newstyle_inplace(%s, %r)" % (self.value, type(self.value))
def __iadd__(self, other): return self.value + other
def __isub__(self, other): return self.value - other
def __imul__(self, other): return self.value * other
def __idiv__(self, other): return self.value / other
def __ifloordiv__(self, other): return self.value // other
def __imod__(self, other): return self.value % other
def __idivmod__(self, other): return divmod(self.value, other)
def __ipow__(self, other): return self.value ** other
def __ilshift__(self, other): return self.value << other
def __irshift__(self, other): return self.value >> other
def __iand__(self, other): return self.value & other
def __ixor__(self, other): return self.value ^ other
def __ior__(self, other): return self.value | other
class newstyle_notdefined(object):
def __init__(self, value): self.value = value
def __repr__(self): return "newstyle_notdefined(%s, %r)" % (self.value, type(self.value))
import sys
class common(object):
def normal(self, leftc, rightc):
for a in leftc:
for b in rightc:
try:
printwith("case", a, "+", b, type(a), type(b))
printwithtype(a + b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, "-", b, type(a), type(b))
printwithtype(a - b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, "*", b, type(a), type(b))
printwithtype(a * b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, "/", b, type(a), type(b))
printwithtype(a / b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, "//", b, type(a), type(b))
printwithtype(a // b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, "%", b, type(a), type(b))
printwithtype(a % b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, "**", b, type(a), type(b))
printwithtype(a ** b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, "<<", b, type(a), type(b))
printwithtype(a << b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, ">>", b, type(a), type(b))
printwithtype(a >> b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, "&", b, type(a), type(b))
printwithtype(a & b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, "^", b, type(a), type(b))
printwithtype(a ^ b)
except:
printwith("same", sys.exc_info()[0])
try:
printwith("case", a, "|", b, type(a), type(b))
printwithtype(a | b)
except:
printwith("same", sys.exc_info()[0])
def clone_list(self, l):
l2 = []
for x in l:
if x is newstyle_inplace:
l2.append(newstyle_inplace(x.value))
elif x is oldstyle_inplace:
l2.append(oldstyle_inplace(x.value))
else :
l2.append(x)
return l2
def inplace(self, leftc, rightc):
rc = self.clone_list(rightc)
for b in rc:
lc = self.clone_list(leftc)
for a in lc:
try:
op = "+"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a += b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
lc = self.clone_list(leftc)
for a in lc:
try:
op = "-"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a -= b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
lc = self.clone_list(leftc)
for a in lc:
try:
op = "*"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a *= b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
lc = self.clone_list(leftc)
for a in lc:
try:
op = "//"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a //= b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
lc = self.clone_list(leftc)
for a in lc:
try:
op = "%"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a %= b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
lc = self.clone_list(leftc)
for a in lc:
try:
op = "**"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a **= b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
lc = self.clone_list(leftc)
for a in lc:
try:
op = "<<"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a <<= b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
lc = self.clone_list(leftc)
for a in lc:
try:
op = ">>"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a >>= b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
lc = self.clone_list(leftc)
for a in lc:
try:
op = "&"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a &= b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
lc = self.clone_list(leftc)
for a in lc:
try:
op = "^"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a ^= b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
lc = self.clone_list(leftc)
for a in lc:
try:
op = "|"
printwith("case", "%s %s= %s" % (a, op, b), type(a), type(b))
a |= b
printwithtype(a)
except:
printwith("same", sys.exc_info()[0])
class ops_simple(common):
def __init__(self):
self.collection = testdata.merge_lists(
[None],
testdata.list_bool,
testdata.list_int,
testdata.list_float,
testdata.list_long[:-1], # the last number is very long
testdata.list_complex,
testdata.list_myint,
testdata.list_myfloat,
testdata.list_mylong,
testdata.list_mycomplex,
testdata.get_Int64_Byte(),
)
self.collection_oldstyle = [oldstyle(x) for x in self.collection]
self.collection_oldstyle_reflect = [oldstyle_reflect(x) for x in self.collection]
self.collection_oldstyle_notdefined = [oldstyle_notdefined(x) for x in self.collection]
self.collection_newstyle = [newstyle(x) for x in self.collection]
self.collection_newstyle_reflect = [newstyle_reflect(x) for x in self.collection]
self.collection_newstyle_notdefined = [newstyle_notdefined(x) for x in self.collection]
self.collection_oldstyle_inplace = [oldstyle_inplace(x) for x in self.collection]
self.collection_newstyle_inplace = [newstyle_inplace(x) for x in self.collection]
def test_normal(self): super(ops_simple, self).normal(self.collection, self.collection)
def test_normal_oc_left(self): super(ops_simple, self).normal(self.collection_oldstyle, self.collection)
def test_normal_oc_right(self): super(ops_simple, self).normal(self.collection, self.collection_oldstyle)
def test_normal_nc_left(self): super(ops_simple, self).normal(self.collection_newstyle, self.collection)
def test_normal_nc_right(self): super(ops_simple, self).normal(self.collection, self.collection_newstyle)
def test_reflect_oc_right(self): super(ops_simple, self).normal(self.collection, self.collection_oldstyle_reflect)
def test_reflect_nc_right(self): super(ops_simple, self).normal(self.collection, self.collection_newstyle_reflect)
def test_oc_notdefined(self): super(ops_simple, self).normal(self.collection_oldstyle_notdefined, self.collection)
def test_nc_notdefined(self): super(ops_simple, self).normal(self.collection_newstyle_notdefined, self.collection)
def test_oc_notdefined_oc_reflect(self): super(ops_simple, self).normal(self.collection_oldstyle_notdefined, self.collection_oldstyle_reflect)
def test_nc_notdefined_nc_reflect(self): super(ops_simple, self).normal(self.collection_newstyle_notdefined, self.collection_newstyle_reflect)
def test_inplace(self): super(ops_simple, self).inplace(self.collection, self.collection)
def test_inplace_ol(self): super(ops_simple, self).inplace(self.collection_oldstyle_inplace, self.collection)
def test_inplace_nl(self): super(ops_simple, self).inplace(self.collection_newstyle_inplace, self.collection)
runtests(ops_simple)
|
IronLanguages/ironpython3
|
Tests/compat/sbs_simple_ops.py
|
Python
|
apache-2.0
| 17,059
|
from .fields import BitField, Field
from nettest.exceptions import NettestError
import struct
class PacketMeta(type):
def __new__(cls, name, bases, attrs):
fields = attrs.get('fields')
if fields is None:
raise NettestError(_("packet class must have 'fields' field"))
_fields = []
for fieldname in attrs['fields']:
field = attrs.get(fieldname)
if field is None:
for baseclass in bases:
field = getattr(baseclass, fieldname)
if field is not None:
break
else:
raise NettestError(_("field '%s' doesn't exsists in class %s")%(fieldname, name))
if not cls.__check_field_type(cls, field):
raise NettestError(_("field '%s' in class %s should be in type (Field, Packet, list)")%(fieldname, name))
_fields.append((fieldname, field))
if isinstance(field, Field):
attrs[fieldname] = field.default_value
if '_fields' in attrs:
raise NettestError(_("the name '_fields' is reserved in class %s")%(name))
attrs['_fields']= _fields
return super(PacketMeta, cls).__new__(cls, name, bases, attrs)
@staticmethod
def __check_field_type(cls, field):
if not isinstance(field, (Field, Packet, list)):
return False
if isinstance(field, (list)):
for subfield in field:
if not cls.__check_field_type(cls, subfield):
return False
return True
class BitDumper(object):
def __init__(self):
self.data= []
self.data_len = []
self.data_len_sum = 0
def clear(self):
self.data = []
self.data_len = []
self.data_len_sum = 0
def push(self, data, length):
data = int(data)
if data < 0 or data > 2**length:
raise NettestError(_("bit value out of range"))
self.data.append(data)
self.data_len.append(length)
self.data_len_sum += length
def dump(self):
if self.data_len_sum % 8 != 0:
raise NettestError(_("incorrect bit field length"))
data = 0
left_len = self.data_len_sum
index = 0
for field_data in self.data:
data += field_data<<(left_len - self.data_len[index])
left_len -= self.data_len[index]
index += 1
length = self.data_len_sum / 8
if length == 1:
return struct.pack('!B', int(data))
elif length == 2:
return struct.pack('!H', int(data))
elif length == 4:
return struct.pack('!I', int(data))
elif length == 8:
return struct.pack('!Q', int(data))
else:
raise NettestError(_("too long bit field"))
class BitLoader(object):
def __init__(self, packet):
self.fields = []
self.bit_len_sum = 0
self.packet = packet
def clear(self):
self.fields = []
self.bit_len_sum = 0
def push(self, fieldname, field):
self.fields.append((fieldname,field))
self.bit_len_sum += field.length
def load(self, data):
if self.bit_len_sum % 8 != 0:
raise NettestError(_("incorrect bit field length"))
byte_len = int(self.bit_len_sum / 8)
data = data[:byte_len]
loaded_len = 0
for field_name, field in self.fields:
field_data = field.from_netbytes(data, loaded_len)
loaded_len += field.length
setattr(self.packet, field_name, field_data)
return byte_len
class Packet(object, metaclass=PacketMeta):
'''define field order
'''
fields=[]
def __init__(self):
for field_name, field in self._fields:
if isinstance(field, Packet):
setattr(self, field_name, field.__class__())
def dump(self):
'''Serialize self to bytes
'''
data = b''
bit_dumper = BitDumper()
for field_name, field in self._fields:
field_value = getattr(self, field_name)
if field_value is None:
raise NettestError(_("%s is None and haven't default value")%(field_name))
if isinstance(field, BitField):
bit_dumper.push(field_value, field.length)
continue
else:
if bit_dumper.data_len_sum > 0:
data += bit_dumper.dump()
bit_dumper.clear()
if isinstance(field, Packet):
data += field_value.dump()
continue
data += field.to_netbytes(field_value)
if bit_dumper.data_len_sum > 0:
data += bit_dumper.dump()
return data
# def __dump_list_data(self, fields):
# data = b''
# for field in fields:
# if isinstance(field, Packet):
# data += field.dump()
# continue
# if isinstance(field, list):
# data += self.__dump_list_data()
# continue
# if isinstance(field, Field):
# data += field.to_netbytes(field_value)
# continue
def load(self, data):
'''Deserialize bytes to a self.
if success, return the total data length used
else return None
'''
loaded_len = 0
bit_loader = BitLoader(self)
for field_name, field in self._fields:
if isinstance(field, BitField):
bit_loader.push(field_name, field)
continue
else:
if bit_loader.bit_len_sum > 0:
loaded_len += bit_loader.load(data[loaded_len:])
bit_loader.clear()
if isinstance(field, Packet):
field_value = getattr(self, field_name)
length = field_value.load(data[loaded_len:])
if length is None:
return None
loaded_len += length
continue
field_data = field.from_netbytes(data[loaded_len:])
if field_data is None:
return None
loaded_len += field.length
setattr(self, field_name, field_data)
if bit_loader.bit_len_sum > 0:
loaded_len += bit_loader.load(data[loaded_len:])
return loaded_len
def to_printable(self):
string = ''
string += '-'*20+str(self.__class__.__name__)+'-'*20+'\n'
for field_name, field in self._fields:
field_value = getattr(self, field_name)
if field_value is None:
string += '%s\tNone\n'%(field_name)
elif isinstance(field, Packet):
string += '%s\t%s\n'%(field_name, field_value.to_printable())
else:
string += '%s\t%s\n'%(field_name, field.to_printable(field_value))
string += '-'*(40+len(self.__class__.__name__))+'\n'
return string
def __eq__(self, other):
for field_name in self.fields:
field_value1 = getattr(self, field_name)
field_value2 = getattr(other, field_name)
if field_value1 != field_value2:
return False
return True
@property
def length(self):
total_len = 0
bit_len = 0
for field_name, field in self._fields:
if isinstance(field, BitField):
bit_len += field.length
elif field.length > 0:
total_len += field.length
else:
field_value = getattr(self, field_name)
total_len += len(field_value)
total_len += int(bit_len/8)
return total_len
|
public0821/nettest
|
nettest/packets/base.py
|
Python
|
apache-2.0
| 7,940
|
from setuptools import setup, find_packages
exec(open('react/version.py').read())
setup(
name='react',
description='Generate fragments of a molecule using smirks',
version=__version__,
packages=find_packages(),
url='https://github.com/3D-e-Chem/python-modified-tanimoto',
author='Stefan Verhoeven',
author_email='s.verhoeven@esciencecenter.nl',
install_requires=['nose', 'coverage', 'mock'],
entry_points={
'console_scripts': [
'react=react.script:main',
],
},
license='Apache',
classifiers=[
'License :: OSI Approved :: Apache Software License'
]
)
|
3D-e-Chem/rdkit-react
|
setup.py
|
Python
|
apache-2.0
| 639
|
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../schemas/particle.xsd'))
code = pyxb.binding.generate.GeneratePython(schema_location=schema_path)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
from pyxb.utils import domutils
def ToDOM (instance, tag=None):
return instance.toDOM().documentElement
class TestParticle (unittest.TestCase):
def test_bad_creation (self):
xml = '<h01 xmlns="URN:test"/>'
dom = pyxb.utils.domutils.StringToDOM(xml)
# Creating with wrong element
self.assertRaises(pyxb.StructuralBadDocumentError, h01b.createFromDOM, dom.documentElement)
def test_h01_empty (self):
xml = '<ns1:h01 xmlns:ns1="URN:test"/>'
dom = pyxb.utils.domutils.StringToDOM(xml)
instance = h01.createFromDOM(dom.documentElement)
self.assert_(instance.elt is None)
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
def test_h01_elt (self):
xml = '<ns1:h01 xmlns:ns1="URN:test"><elt/></ns1:h01>'
dom = pyxb.utils.domutils.StringToDOM(xml)
instance = h01.createFromDOM(dom.documentElement)
self.assert_(instance.elt is not None)
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
def test_h01_elt2 (self):
xml = '<h01 xmlns="URN:test"><elt/><elt/></h01>'
dom = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(ExtraContentError, h01.createFromDOM, dom.documentElement)
def test_h01b_empty (self):
xml = '<ns1:h01b xmlns:ns1="URN:test"/>'
dom = pyxb.utils.domutils.StringToDOM(xml)
instance = h01b.createFromDOM(dom.documentElement)
self.assert_(instance.elt is None)
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
def test_h01b_elt (self):
xml = '<ns1:h01b xmlns:ns1="URN:test"><elt/></ns1:h01b>'
dom = pyxb.utils.domutils.StringToDOM(xml)
instance = h01b.createFromDOM(dom.documentElement)
self.assert_(instance.elt is not None)
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
def test_h01b_elt2 (self):
xml = '<ns1:h01b xmlns:ns1="URN:test"><elt/><elt/></ns1:h01b>'
dom = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(ExtraContentError, h01b.createFromDOM, dom.documentElement)
def test_h11_empty (self):
xml = '<ns1:h11 xmlns:ns1="URN:test"/>'
dom = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(MissingContentError, h11.createFromDOM, dom.documentElement)
def test_h11_elt (self):
xml = '<ns1:h11 xmlns:ns1="URN:test"><elt/></ns1:h11>'
dom = pyxb.utils.domutils.StringToDOM(xml)
instance = h11.createFromDOM(dom.documentElement)
self.assert_(instance.elt is not None)
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
def test_h24 (self):
xml = '<h24 xmlns="URN:test"></h24>'
dom = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(MissingContentError, h24.createFromDOM, dom.documentElement)
for num_elt in range(0, 5):
xml = '<ns1:h24 xmlns:ns1="URN:test">%s</ns1:h24>' % (''.join(num_elt * ['<elt/>']),)
dom = pyxb.utils.domutils.StringToDOM(xml)
if 2 > num_elt:
self.assertRaises(MissingContentError, h24.createFromDOM, dom.documentElement)
elif 4 >= num_elt:
instance = h24.createFromDOM(dom.documentElement)
self.assertEqual(num_elt, len(instance.elt))
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
else:
self.assertRaises(ExtraContentError, h24.createFromDOM, dom.documentElement)
def test_h24b (self):
xml = '<ns1:h24b xmlns:ns1="URN:test"></ns1:h24b>'
dom = pyxb.utils.domutils.StringToDOM(xml)
self.assertRaises(MissingContentError, h24b.createFromDOM, dom.documentElement)
for num_elt in range(0, 5):
xml = '<ns1:h24b xmlns:ns1="URN:test">%s</ns1:h24b>' % (''.join(num_elt * ['<elt/>']),)
dom = pyxb.utils.domutils.StringToDOM(xml)
if 2 > num_elt:
self.assertRaises(MissingContentError, h24b.createFromDOM, dom.documentElement)
elif 4 >= num_elt:
instance = h24b.createFromDOM(dom.documentElement)
self.assertEqual(num_elt, len(instance.elt))
self.assertEqual(ToDOM(instance).toxml("utf-8"), xml)
else:
self.assertRaises(ExtraContentError, h24b.createFromDOM, dom.documentElement)
if __name__ == '__main__':
unittest.main()
|
jonfoster/pyxb1
|
tests/drivers/test-particle.py
|
Python
|
apache-2.0
| 4,830
|
from flask.ext import restful
from . import api
class Welcome(restful.Resource):
def get(self):
return api.send_static_file('index.html')
|
modulo-/knoydart
|
api/api_0/apiRequest/Welcome.py
|
Python
|
apache-2.0
| 153
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
try:
import ordereddict as collections
except ImportError: # pragma: no cover
import collections # pragma: no cover
class DictSerializableModel(object):
"""A model mixin: class dictionarizable-undictionarizable class mixin.
A class that can be deserialized from a dictionary and can be serialized
into a dictionary
"""
def to_dict(self):
"""Return dict representation (serialize into dictionary) of the class.
"""
items = [(name, value) for name, value in
inspect.getmembers(self, lambda o: not inspect.ismethod(o))
if not name.startswith("_")]
return collections.OrderedDict(items)
def from_dict(self, attributes):
"""Convert a dictionary into a object/Or copy constructor
Update the current instance based on attribute->value items in
*attributes* dictionary.
"""
for attribute in attributes:
setattr(self, attribute, attributes[attribute])
return self
@classmethod
def init_from_dict(cls, input_dict):
"""Construct a model instance from a dictionary.
This is only meant to be used for converting a
response model into a model.
When converting a request-model into a model,
use to_dict.
"""
raise NotImplementedError
|
obulpathi/poppy
|
poppy/model/common.py
|
Python
|
apache-2.0
| 1,962
|
from gpu import *
LAMP_TYPES = [
GPU_DYNAMIC_LAMP_DYNVEC,
GPU_DYNAMIC_LAMP_DYNCO,
GPU_DYNAMIC_LAMP_DYNIMAT,
GPU_DYNAMIC_LAMP_DYNPERSMAT,
GPU_DYNAMIC_LAMP_DYNENERGY,
GPU_DYNAMIC_LAMP_DYNENERGY,
GPU_DYNAMIC_LAMP_DYNCOL,
GPU_DYNAMIC_LAMP_DISTANCE,
GPU_DYNAMIC_LAMP_ATT1,
GPU_DYNAMIC_LAMP_ATT2,
GPU_DYNAMIC_LAMP_SPOTSIZE,
GPU_DYNAMIC_LAMP_SPOTBLEND,
]
MIST_TYPES = [
GPU_DYNAMIC_MIST_ENABLE,
GPU_DYNAMIC_MIST_START,
GPU_DYNAMIC_MIST_DISTANCE,
GPU_DYNAMIC_MIST_INTENSITY,
GPU_DYNAMIC_MIST_TYPE,
GPU_DYNAMIC_MIST_COLOR,
]
WORLD_TYPES = [
GPU_DYNAMIC_HORIZON_COLOR,
GPU_DYNAMIC_AMBIENT_COLOR,
]
MATERIAL_TYPES = [
GPU_DYNAMIC_MAT_DIFFRGB,
GPU_DYNAMIC_MAT_REF,
GPU_DYNAMIC_MAT_SPECRGB,
GPU_DYNAMIC_MAT_SPEC,
GPU_DYNAMIC_MAT_HARD,
GPU_DYNAMIC_MAT_EMIT,
GPU_DYNAMIC_MAT_AMB,
GPU_DYNAMIC_MAT_ALPHA,
]
TYPE_TO_NAME = {
GPU_DYNAMIC_OBJECT_VIEWMAT : 'view_mat',
GPU_DYNAMIC_OBJECT_MAT : 'model_mat',
GPU_DYNAMIC_OBJECT_VIEWIMAT : 'inv_view_mat',
GPU_DYNAMIC_OBJECT_IMAT : 'inv_model_mat',
GPU_DYNAMIC_OBJECT_COLOR : 'color',
GPU_DYNAMIC_OBJECT_AUTOBUMPSCALE : 'auto_bump_scale',
GPU_DYNAMIC_MIST_ENABLE : 'use_mist',
GPU_DYNAMIC_MIST_START : 'start',
GPU_DYNAMIC_MIST_DISTANCE : 'depth',
GPU_DYNAMIC_MIST_INTENSITY : 'intensity',
GPU_DYNAMIC_MIST_TYPE : 'falloff',
GPU_DYNAMIC_MIST_COLOR : 'color',
GPU_DYNAMIC_HORIZON_COLOR : 'horizon_color',
GPU_DYNAMIC_AMBIENT_COLOR : 'ambient_color',
GPU_DYNAMIC_LAMP_DYNVEC : 'dynvec',
GPU_DYNAMIC_LAMP_DYNCO : 'dynco',
GPU_DYNAMIC_LAMP_DYNIMAT : 'dynimat',
GPU_DYNAMIC_LAMP_DYNPERSMAT : 'dynpersmat',
GPU_DYNAMIC_LAMP_DYNENERGY : 'energy',
GPU_DYNAMIC_LAMP_DYNCOL : 'color',
GPU_DYNAMIC_LAMP_DISTANCE : 'distance',
GPU_DYNAMIC_LAMP_ATT1 : 'linear_attenuation',
GPU_DYNAMIC_LAMP_ATT2 : 'quadratic_attenuation',
GPU_DYNAMIC_LAMP_SPOTSIZE : 'spot_size',
GPU_DYNAMIC_LAMP_SPOTBLEND : 'spot_blend',
GPU_DYNAMIC_MAT_DIFFRGB : 'diffuse_color',
GPU_DYNAMIC_MAT_REF : 'diffuse_intensity',
GPU_DYNAMIC_MAT_SPECRGB : 'specular_color',
GPU_DYNAMIC_MAT_SPEC : 'specular_intensity',
GPU_DYNAMIC_MAT_HARD : 'specular_hardness',
GPU_DYNAMIC_MAT_EMIT : 'emit',
GPU_DYNAMIC_MAT_AMB : 'ambient',
GPU_DYNAMIC_MAT_ALPHA : 'alpha',
}
TYPE_TO_SEMANTIC = {
GPU_DYNAMIC_LAMP_DYNVEC : 'BL_DYNVEC',
GPU_DYNAMIC_LAMP_DYNCO : 'BL_DYNCO',
GPU_DYNAMIC_LAMP_DYNIMAT : 'BL_DYNIMAT',
GPU_DYNAMIC_LAMP_DYNPERSMAT : 'BL_DYNPERSMAT',
CD_ORCO: 'POSITION',
-1: 'NORMAL' # Hack until the gpu module has something for normals
}
DATATYPE_TO_CONVERTER = {
GPU_DATA_1I : lambda x : x,
GPU_DATA_1F : lambda x : x,
GPU_DATA_2F : lambda x : list(x),
GPU_DATA_3F : lambda x : list(x),
GPU_DATA_4F : lambda x : list(x),
}
DATATYPE_TO_GLTF_TYPE = {
GPU_DATA_1I : 5124, # INT
GPU_DATA_1F : 5126, # FLOAT
GPU_DATA_2F : 35664, # FLOAT_VEC2
GPU_DATA_3F : 35665, # FLOAT_VEC3
GPU_DATA_4F : 35666, # FLOAT_VEC4
}
|
lukesanantonio/blendergltf
|
gpu_luts.py
|
Python
|
apache-2.0
| 3,110
|
from django.http import HttpResponse
# this redirect key is (hopefully) unique but generic so it doesn't signpost the use of DMP/Django.
# not prefixing with X- because that's now deprecated.
REDIRECT_HEADER_KEY = 'Redirect-Location'
###############################################################################
### Redirect with Javascript instead of 301/302
### See also exceptions.py for two additional redirect methods
class HttpResponseJavascriptRedirect(HttpResponse):
'''
Sends a regular HTTP 200 OK response that contains Javascript to
redirect the browser:
<script>window.location.assign("...");</script>.
If redirect_to is empty, it redirects to the current location (essentially refreshing
the current page):
<script>window.location.assign(window.location.href);</script>.
Normally, redirecting should be done via HTTP 302 rather than Javascript.
Use this class when your only choice is through Javascript.
For example, suppose you need to redirect the top-level page from an Ajax response.
Ajax redirects normally only redirects the Ajax itself (not the page that initiated the call),
and this default behavior is usually what is needed. However, there are instances when the
entire page must be redirected, even if the call is Ajax-based.
After the redirect_to parameter, you can use any of the normal HttpResponse constructor arguments.
If you need to omit the surrounding <script> tags, send "include_script_tag=False" to
the constructor. One use case for omitting the tags is when the caller is a
JQuery $.script() ajax call.
A custom header is set in the response. This allows middleware, your web server, or
calling JS code to adjust the redirect if needed.
Note that this method doesn't use the <meta> tag or Refresh header method because
they aren't predictable within Ajax (for example, JQuery seems to ignore them).
'''
def __init__(self, redirect_to=None, *args, **kwargs):
# set up the code
if redirect_to:
script = 'window.location.assign("{}");'.format(redirect_to.split('#')[0])
else:
script = 'window.location.assign(window.location.href.split("#")[0])'
# do we need to add the <script> tag? (that's the default)
if kwargs.pop('include_script_tag', True):
script = '<script>{}</script>'.format(script)
# call the super
super().__init__(script, *args, **kwargs)
# add the custom header
self[REDIRECT_HEADER_KEY] = redirect_to or 'window.location.href'
|
doconix/django-mako-plus
|
django_mako_plus/http.py
|
Python
|
apache-2.0
| 2,608
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/graceful-restart/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information for BGP graceful-restart
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"graceful-restart",
"state",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/state/enabled (boolean)
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict([("enabled", enabled)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/graceful-restart/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information for BGP graceful-restart
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"graceful-restart",
"state",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/state/enabled (boolean)
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict([("enabled", enabled)])
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/afi_safis/afi_safi/graceful_restart/state/__init__.py
|
Python
|
apache-2.0
| 11,675
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, First Party Software
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import gsxws
from django.db.models import Q
from django.contrib import messages
from django.core.cache import cache
from django.shortcuts import render, redirect, get_object_or_404
from django.utils.translation import ugettext as _
from django.template.defaultfilters import slugify
from django.views.decorators.cache import cache_page
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from servo.models import Device, Order, Product, GsxAccount, ServiceOrderItem
from servo.forms.devices import DeviceForm, DeviceUploadForm, DeviceSearchForm
class RepairDiagnosticResults:
pass
class DiagnosticResults(object):
def __init__(self, diags):
if not diags.diagnosticTestData:
raise gsxws.GsxError('Missing diagnostic data')
self.diags = dict(result={}, profile={}, report={})
for r in diags.diagnosticTestData.testResult.result:
self.diags['result'][r.name] = r.value
for r in diags.diagnosticProfileData.profile.unit.key:
self.diags['profile'][r.name] = r.value
for r in diags.diagnosticProfileData.report.reportData.key:
self.diags['report'][r.name] = r.value
def __iter__(self):
return iter(self.diags)
def model_from_slug(product_line, model=None):
"""
Returns product description for model slug or models dict for
the specified product line
"""
if not cache.get("slugmap"):
slugmap = {} # Map model slug to corresponding product description
product_lines = gsxws.products.models()
for k, v in product_lines.items():
d = {}
for p in v['models']:
slug = slugify(p)
d[slug] = p
slugmap[k] = d
cache.set("slugmap", slugmap)
models = cache.get("slugmap").get(product_line)
if model is not None:
return models.get(model)
return models
def prep_list_view(request, product_line=None, model=None):
title = _('Devices')
all_devices = Device.objects.all()
product_lines = gsxws.products.models()
if product_line is None:
product_line = product_lines.keys()[0]
models = model_from_slug(product_line)
if model is None:
model = models.keys()[0]
title = product_lines[product_line]['name']
else:
title = models.get(model)
if product_line == "OTHER":
all_devices = all_devices.filter(product_line=product_line)
else:
all_devices = all_devices.filter(slug=model)
page = request.GET.get('page')
paginator = Paginator(all_devices, 50)
try:
devices = paginator.page(page)
except PageNotAnInteger:
devices = paginator.page(1)
except EmptyPage:
devices = paginator.page(paginator.num_pages)
return locals()
def prep_detail_view(request, pk, product_line=None, model=None):
if pk is None:
device = Device()
else:
device = Device.objects.get(pk=pk)
data = prep_list_view(request, product_line, model)
data['device'] = device
data['title'] = device.description
return data
def index(request, product_line=None, model=None):
if request.session.get('return_to'):
del(request.session['return_to'])
data = prep_list_view(request, product_line, model)
if data['all_devices'].count() > 0:
return redirect(data['all_devices'].latest())
return render(request, "devices/index.html", data)
def delete_device(request, product_line, model, pk):
dev = Device.objects.get(pk=pk)
if request.method == 'POST':
from django.db.models import ProtectedError
try:
dev.delete()
messages.success(request, _("Device deleted"))
except ProtectedError:
messages.error(request, _("Cannot delete device with GSX repairs"))
return redirect(dev)
return redirect(index)
data = {'action': request.path}
data['device'] = dev
return render(request, "devices/remove.html", data)
def edit_device(request, pk=None, product_line=None, model=None):
"""
Edits an existing device or adds a new one
"""
device = Device()
device.sn = request.GET.get('sn', '')
if product_line is not None:
device.product_line = product_line
if model is not None:
device.product_line = product_line
device.description = model_from_slug(product_line, model)
if pk is not None:
device = Device.objects.get(pk=pk)
form = DeviceForm(instance=device)
if request.method == "POST":
form = DeviceForm(request.POST, request.FILES, instance=device)
if form.is_valid():
device = form.save()
messages.success(request, _(u"%s saved") % device.description)
device.add_tags(request.POST.getlist('tag'))
return redirect(view_device,
pk=device.pk,
product_line=device.product_line,
model=device.slug)
data = prep_detail_view(request, pk, product_line, model)
data['form'] = form
return render(request, 'devices/form.html', data)
def view_device(request, pk, product_line=None, model=None):
data = prep_detail_view(request, pk, product_line, model)
return render(request, "devices/view.html", data)
def diagnostics(request, pk):
"""
Fetches MRI diagnostics or initiates iOS diags from GSX
"""
device = get_object_or_404(Device, pk=pk)
if request.GET.get('a') == 'init':
if request.method == 'POST':
from gsxws import diagnostics
order = request.POST.get('order')
order = device.order_set.get(pk=order)
email = request.POST.get('email')
diag = diagnostics.Diagnostics(serialNumber=device.sn)
diag.emailAddress = email
diag.shipTo = order.location.gsx_shipto
try:
GsxAccount.default(request.user)
res = diag.initiate()
msg = _('Diagnostics initiated - diags://%s') % res
order.notify("init_diags", msg, request.user)
messages.success(request, msg)
except gsxws.GsxError, e:
messages.error(request, e)
return redirect(order)
order = request.GET.get('order')
order = device.order_set.get(pk=order)
customer = order.customer
url = request.path
return render(request, "devices/diagnostic_init.html", locals())
if request.GET.get('a') == 'get':
try:
diagnostics = device.get_diagnostics(request.user)
if device.is_ios():
diagnostics = DiagnosticResults(diagnostics)
return render(request, "devices/diagnostic_ios.html", locals())
return render(request, "devices/diagnostic_results.html", locals())
except gsxws.GsxError, e:
return render(request, "devices/diagnostic_error.html", {'error': e})
return render(request, "devices/diagnostics.html", locals())
def get_gsx_search_results(request, what, param, query):
"""
The second phase of a GSX search.
There should be an active GSX session open at this stage.
"""
data = {}
results = []
query = query.upper()
device = Device(sn=query)
error_template = "search/results/gsx_error.html"
# @TODO: this isn't a GSX search. Move it somewhere else.
if what == "orders":
try:
if param == 'serialNumber':
device = Device.objects.get(sn__exact=query)
if param == 'alternateDeviceId':
device = Device.objects.get(imei__exact=query)
except (Device.DoesNotExist, ValueError,):
return render(request, "search/results/gsx_notfound.html")
orders = device.order_set.all()
return render(request, "orders/list.html", locals())
if what == "warranty":
# Update wty info if been here before
try:
device = Device.objects.get(sn__exact=query)
device.update_gsx_details()
except Exception:
try:
device = Device.from_gsx(query)
except Exception as e:
return render(request, error_template, {'message': e})
results.append(device)
# maybe it's a device we've already replaced...
try:
soi = ServiceOrderItem.objects.get(sn__iexact=query)
results[0].repeat_service = soi.order
except ServiceOrderItem.DoesNotExist:
pass
if what == "parts":
# looking for parts
if param == "partNumber":
# ... with a part number
part = gsxws.Part(partNumber=query)
try:
partinfo = part.lookup()
except gsxws.GsxError, e:
return render(request, error_template, {'message': e})
product = Product.from_gsx(partinfo)
cache.set(query, product)
results.append(product)
if param == "serialNumber":
# ... with a serial number
try:
results = device.get_parts()
data['device'] = device
except Exception, e:
return render(request, error_template, {'message': e})
if param == "productName":
product = gsxws.Product(productName=query)
parts = product.parts()
for p in parts:
results.append(Product.from_gsx(p))
if what == "repairs":
# Looking for GSX repairs
if param == "serialNumber":
# ... with a serial number
try:
device = gsxws.Product(query)
#results = device.repairs()
# @TODO: move the encoding hack to py-gsxws
for i, p in enumerate(device.repairs()):
d = {'purchaseOrderNumber': p.purchaseOrderNumber}
d['repairConfirmationNumber'] = p.repairConfirmationNumber
d['createdOn'] = p.createdOn
d['customerName'] = p.customerName.encode('utf-8')
d['repairStatus'] = p.repairStatus
results.append(d)
except gsxws.GsxError, e:
return render(request, "search/results/gsx_notfound.html")
elif param == "dispatchId":
# ... with a repair confirmation number
repair = gsxws.Repair(number=query)
try:
results = repair.lookup()
except gsxws.GsxError as message:
return render(request, error_template, locals())
return render(request, "devices/search_gsx_%s.html" % what, locals())
def search_gsx(request, what, param, query):
"""
The first phase of a GSX search
"""
title = _(u'Search results for "%s"') % query
try:
act = request.session.get("gsx_account")
act = None
if act is None:
GsxAccount.default(user=request.user)
else:
act.connect(request.user)
except gsxws.GsxError as message:
return render(request, "devices/search_gsx_error.html", locals())
if request.is_ajax():
if what == "parts":
try:
dev = Device.from_gsx(query)
products = dev.get_parts()
return render(request, "devices/parts.html", locals())
except gsxws.GsxError as message:
return render(request, "search/results/gsx_error.html", locals())
return get_gsx_search_results(request, what, param, query)
return render(request, "devices/search_gsx.html", locals())
def search(request):
"""
Searching for devices from the main navbar
"""
query = request.GET.get("q", '').strip()
request.session['search_query'] = query
query = query.upper()
valid_arg = gsxws.validate(query)
if valid_arg in ('serialNumber', 'alternateDeviceId',):
return redirect(search_gsx, "warranty", valid_arg, query)
devices = Device.objects.filter(
Q(sn__icontains=query) | Q(description__icontains=query)
)
title = _(u'Devices matching "%s"') % query
return render(request, "devices/search.html", locals())
def find(request):
"""
Searching for device from devices/find
"""
title = _("Device search")
form = DeviceSearchForm()
results = Device.objects.none()
if request.method == 'POST':
form = DeviceSearchForm(request.POST)
if form.is_valid():
fdata = form.cleaned_data
results = Device.objects.all()
if fdata.get("product_line"):
results = results.filter(product_line__in=fdata['product_line'])
if fdata.get("warranty_status"):
results = results.filter(warranty_status__in=fdata['warranty_status'])
if fdata.get("description"):
results = results.filter(description__icontains=fdata['description'])
if fdata.get("sn"):
results = results.filter(sn__icontains=fdata['sn'])
if fdata.get("date_start"):
results = results.filter(created_at__range=[fdata['date_start'],
fdata['date_end']])
paginator = Paginator(results, 100)
page = request.GET.get("page")
try:
devices = paginator.page(page)
except PageNotAnInteger:
devices = paginator.page(1)
except EmptyPage:
devices = paginator.page(paginator.num_pages)
return render(request, "devices/find.html", locals())
#@cache_page(60*5)
def parts(request, pk, order_id, queue_id):
"""
Lists available parts for this device/order
taking into account the order's queues GSX Sold-To
and the Location's corresponding GSX account
"""
from decimal import InvalidOperation
device = Device.objects.get(pk=pk)
order = device.order_set.get(pk=order_id)
try:
# remember the right GSX account
act = GsxAccount.default(request.user, order.queue)
request.session['gsx_account'] = act.pk
products = device.get_parts()
except gsxws.GsxError as message:
return render(request, "search/results/gsx_error.html", locals())
except AttributeError:
message = _('Invalid serial number for parts lookup')
return render(request, "search/results/gsx_error.html", locals())
except InvalidOperation:
message = _('Error calculating prices. Please check your system settings.')
return render(request, "search/results/gsx_error.html", locals())
return render(request, "devices/parts.html", locals())
def model_parts(request, product_line=None, model=None):
"""
Shows parts for this device model
"""
data = prep_list_view(request, product_line, model)
if cache.get("slugmap") and model:
models = cache.get("slugmap")[product_line]
data['what'] = "parts"
data['param'] = "productName"
data['query'] = models[model]
data['products'] = Product.objects.filter(tags__tag=data['query'])
return render(request, "devices/index.html", data)
def choose(request, order_id):
"""
Choosing a device from within an SRO
Does GSX lookup in case device is not found locally
"""
context = {'order': order_id}
if request.method == "POST":
query = request.POST.get('q').upper()
results = Device.objects.filter(Q(sn__iexact=query) | Q(imei=query))
if len(results) < 1:
try:
current_order = request.session.get("current_order_id")
current_order = Order.objects.get(pk=current_order)
if current_order and current_order.queue:
GsxAccount.default(request.user, current_order.queue)
else:
GsxAccount.default(request.user)
results = [Device.from_gsx(query)]
except Exception as e:
context['error'] = e
return render(request, "devices/choose-error.html", context)
context['results'] = results
return render(request, "devices/choose-list.html", context)
return render(request, "devices/choose.html", context)
def upload_devices(request):
"""
User uploads device DB as tab-delimited CSV file
SN USERNAME PASSWORD NOTES
"""
gsx_account = None
form = DeviceUploadForm()
if request.method == "POST":
form = DeviceUploadForm(request.POST, request.FILES)
if form.is_valid():
i = 0
df = form.cleaned_data['datafile'].read()
if form.cleaned_data.get('do_warranty_check'):
gsx_account = GsxAccount.default(request.user)
for l in df.split("\r"):
l = l.decode("latin-1").encode("utf-8")
row = l.strip().split("\t")
if gsx_account:
try:
device = Device.from_gsx(row[0])
except Exception, e:
messages.error(request, e)
break
else:
device = Device.objects.get_or_create(sn=row[0])[0]
try:
device.username = row[1]
device.password = row[2]
device.notes = row[3]
except IndexError:
pass
device.save()
i += 1
if form.cleaned_data.get("customer"):
customer = form.cleaned_data['customer']
customer.devices.add(device)
messages.success(request, _("%d devices imported") % i)
return redirect(index)
data = {'form': form, 'action': request.path}
return render(request, "devices/upload_devices.html", data)
def update_gsx_details(request, pk):
"""
Updates devices GSX warranty details
"""
device = get_object_or_404(Device, pk=pk)
try:
GsxAccount.default(request.user)
device.update_gsx_details()
messages.success(request, _("Warranty status updated successfully"))
except Exception, e:
messages.error(request, e)
if request.session.get('return_to'):
return redirect(request.session['return_to'])
return redirect(device)
def get_info(request, pk):
device = get_object_or_404(Device, pk=pk)
return render(request, "devices/get_info.html", locals())
|
filipp/Servo
|
servo/views/device.py
|
Python
|
bsd-2-clause
| 20,045
|
"""
Compute the force of gravity between the Earth and Sun.
Copyright 2012, Casey W. Stark. See LICENSE.txt for more information.
"""
# Import the gravitational constant and the Quantity class
from dimensionful import G, Quantity
# Supply the mass of Earth, mass of Sun, and the distance between.
mass_earth = Quantity(5.9742e27, "g")
mass_sun = Quantity(1.0, "Msun")
distance = Quantity(1.0, "AU")
# Calculate it
force_gravity = G * mass_earth * mass_sun / distance**2
force_gravity.convert_to_cgs()
# Report
print ""
print "The force of gravity between the Earth and Sun is %s" % force_gravity
print ""
# prints "The force of gravity between the Earth and Sun is 3.54296304519e+27 cm*g/s**2"
|
caseywstark/dimensionful
|
example/gravity.py
|
Python
|
bsd-2-clause
| 702
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from django.db import migrations
idDistributionMap = {
80: {'normalMean': 3450, 'normalStDev': 125},
133: {'normalMean': 3350, 'normalStDev': 75},
134: {'normalMean': 2350, 'normalStDev': 50},
135: {'normalMean': 3500, 'normalStDev': 100},
81: {'normalMean': 1375, 'normalStDev': 75},
82: {'normalMean': 1350, 'normalStDev': 75},
173: {'normalMean': 3350, 'normalStDev': 55},
110: {'normalMean': 2400, 'normalStDev': 32},
177: {'normalMean': 1900, 'normalStDev': 20},
188: {'normalMean': 40, 'normalStDev': 10},
129: {'normalMean': 1550, 'normalStDev': 25},
105: {'logNormalOffset': 3000, 'logNormalMean': 700,
'logNormalStDev': 0.8},
228: {'normalMean': 2750, 'normalStDev': 100},
227: {'normalMean': 2700, 'normalStDev': 200},
162: {'normalMean': 2450, 'normalStDev': 75},
128: {'normalMean': 3200, 'normalStDev': 300},
174: {'normalMean': 1500, 'normalStDev': 125},
136: {'normalMean': 1350, 'normalStDev': 150},
237: {'normalMean': 2450, 'normalStDev': 75},
131: {'normalMean': 2450, 'normalStDev': 75},
178: {'normalMean': 1600, 'normalStDev': 150},
179: {'normalMean': 2100, 'normalStDev': 150},
108: {'normalMean': 500, 'normalStDev': 50},
100: {'normalMean': 1000, 'normalStDev': 50},
252: {'normalMean': 351, 'normalStDev': 1},
251: {'normalMean': 405, 'normalStDev': 1},
250: {'normalMean': 330, 'normalStDev': 1},
245: {'normalMean': 795, 'normalStDev': 107},
238: {'normalMean': 600, 'normalStDev': 50},
176: {'normalMean': 320, 'normalStDev': 20},
243: {'normalMean': 656, 'normalStDev': 31},
107: {'normalMean': 1650, 'normalStDev': 25},
109: {'normalMean': 775, 'normalStDev': 75},
157: {'normalMean': 650, 'normalStDev': 75},
160: {'normalMean': 9999, 'normalStDev': 999},
126: {'normalMean': 1050, 'normalStDev': 50},
99: {'normalMean': 1050, 'normalStDev': 50},
112: {'normalMean': 2050, 'normalStDev': 75},
207: {'normalMean': 130, 'normalStDev': 15},
209: {'normalMean': 900, 'normalStDev': 150},
203: {'normalMean': 700, 'normalStDev': 50},
204: {'normalMean': 750, 'normalStDev': 75},
130: {'normalMean': 2200, 'normalStDev': 75},
132: {'normalMean': 2150, 'normalStDev': 75},
140: {'normalMean': 2050, 'normalStDev': 125},
148: {'normalMean': 1050, 'normalStDev': 75},
172: {'normalMean': 450, 'normalStDev': 50},
149: {'normalMean': 1050, 'normalStDev': 75},
101: {'normalMean': 700, 'normalStDev': 100},
231: {'normalMean': 700, 'normalStDev': 100},
230: {'normalMean': 300, 'normalStDev': 50},
150: {'normalMean': 1050, 'normalStDev': 75},
127: {'normalMean': 1250, 'normalStDev': 50},
147: {'normalMean': 200, 'normalStDev': 30}}
def forwards_func(apps, schema_editor):
Language = apps.get_model("lexicon", "Language")
languages = Language.objects.filter(
id__in=set(idDistributionMap.keys())).all()
for language in languages:
for k, v in idDistributionMap[language.id].items():
setattr(language, k, v)
language.save()
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0144_clean_lexeme_romanised_2')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
lingdb/CoBL-public
|
ielex/lexicon/migrations/0145_fix_language_distributions.py
|
Python
|
bsd-2-clause
| 3,447
|
r"""
This module contains linear algebra solvers for SparseMatrices,
TPMatrices and BlockMatrices.
"""
import numpy as np
from numbers import Number, Integral
from scipy.sparse import spmatrix, kron
from scipy.sparse.linalg import spsolve, splu
from scipy.linalg import solve_banded
from shenfun.config import config
from shenfun.optimization import optimizer, get_optimized
from shenfun.matrixbase import SparseMatrix, extract_bc_matrices, \
SpectralMatrix, BlockMatrix, TPMatrix, get_simplified_tpmatrices
from shenfun.forms.arguments import Function
from mpi4py import MPI
comm = MPI.COMM_WORLD
def Solver(mats):
"""Return appropriate solver for `mats`
Parameters
----------
mats : SparseMatrix or list of SparseMatrices
Returns
-------
Matrix solver (:class:`.SparseMatrixSolver`)
Note
----
The list of matrices may include boundary matrices. The returned solver
will incorporate these boundary matrices automatically on the right hand
side of the equation system.
"""
assert isinstance(mats, (SparseMatrix, list))
bc_mats = []
mat = mats
if isinstance(mats, list):
bc_mats = extract_bc_matrices([mats])
mat = sum(mats[1:], mats[0])
return mat.get_solver()([mat]+bc_mats)
class SparseMatrixSolver:
"""SparseMatrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
Note
----
The list of matrices may include boundary matrices. The returned solver
will incorporate these boundary matrices automatically on the right hand
side of the equation system.
"""
def __init__(self, mat):
assert isinstance(mat, (SparseMatrix, list))
self.bc_mats = []
if isinstance(mat, list):
bc_mats = extract_bc_matrices([mat])
mat = sum(mat[1:], mat[0])
self.bc_mats = bc_mats
self.mat = mat
self._lu = None
self._inner_arg = None # argument to inner_solve
assert self.mat.shape[0] == self.mat.shape[1]
def apply_bcs(self, b, u, axis=0):
if len(self.bc_mats) > 0:
u.set_boundary_dofs()
w0 = np.zeros_like(b)
for bc_mat in self.bc_mats:
b -= bc_mat.matvec(u, w0, axis=axis)
return b
def apply_constraints(self, b, constraints, axis=0):
"""Apply constraints to matrix `self.mat` and rhs vector `b`
Parameters
----------
b : array
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
axis : int
The axis we are solving over
"""
# Only apply constraint to matrix first time around
if len(constraints) > 0:
if b.ndim > 1:
T = b.function_space().bases[axis]
A = self.mat
if isinstance(A, spmatrix):
for (row, val) in constraints:
if self._lu is None:
A = A.tolil()
_, zerorow = A[row].nonzero()
A[(row, zerorow)] = 0
A[row, row] = 1
self.mat = A.tocsc()
if b.ndim > 1:
b[T.si[row]] = val
else:
b[row] = val
elif isinstance(A, SparseMatrix):
for (row, val) in constraints:
if self._lu is None:
for key, vals in A.items():
if key >= 0:
M = A.shape[0]-key
v = np.broadcast_to(np.atleast_1d(vals), M).copy()
if row < M:
v[row] = int(key == 0)/A.scale
elif key < 0:
M = A.shape[0]+key
v = np.broadcast_to(np.atleast_1d(vals), M).copy()
if row+key < M and row+key > 0:
v[row+key] = 0
A[key] = v
if b.ndim > 1:
b[T.si[row]] = val
else:
b[row] = val
return b
def perform_lu(self):
"""Perform LU-decomposition"""
if self._lu is None:
if isinstance(self.mat, SparseMatrix):
self.mat = self.mat.diags('csc')
self._lu = splu(self.mat, permc_spec=config['matrix']['sparse']['permc_spec'])
self.dtype = self.mat.dtype.char
self._inner_arg = (self._lu, self.dtype)
return self._lu
def solve(self, b, u, axis, lu):
"""Solve Au=b
Solve along axis if b and u are multidimensional arrays.
Parameters
----------
b, u : arrays of rhs and output
Both can be multidimensional
axis : int
The axis we are solving over
lu : LU-decomposition
Can be either the output from splu, or a dia-matrix containing
the L and U matrices. The latter is used in subclasses.
"""
if axis > 0:
u = np.moveaxis(u, axis, 0)
if u is not b:
b = np.moveaxis(b, axis, 0)
s = slice(0, self.mat.shape[0])
if b.ndim == 1:
if b.dtype.char in 'fdg' or self.dtype in 'FDG':
u[s] = lu.solve(b[s])
else:
u.real[s] = lu.solve(b[s].real)
u.imag[s] = lu.solve(b[s].imag)
else:
N = b[s].shape[0]
P = np.prod(b[s].shape[1:])
br = b[s].reshape((N, P))
if b.dtype.char in 'fdg' or self.dtype in 'FDG':
u[s] = lu.solve(br).reshape(u[s].shape)
else:
u.real[s] = lu.solve(br.real).reshape(u[s].shape)
u.imag[s] = lu.solve(br.imag).reshape(u[s].shape)
if axis > 0:
u = np.moveaxis(u, 0, axis)
if u is not b:
b = np.moveaxis(b, 0, axis)
return u
@staticmethod
def inner_solve(u, lu):
"""Solve Au=b for one-dimensional u
On entry u is the rhs b, on exit it contains the solution.
Parameters
----------
u : array 1D
rhs on entry and solution on exit
lu : LU-decomposition
Can be either a 2-tuple with (output from splu, dtype), or a scipy
dia-matrix containing the L and U matrices. The latter is used in
subclasses.
"""
lu, dtype = lu
s = slice(0, lu.shape[0])
if u.dtype.char in 'fdg' or dtype in 'FDG':
u[s] = lu.solve(u[s])
else:
u.real[s] = lu.solve(u.real[s])
u.imag[s] = lu.solve(u.imag[s])
def __call__(self, b, u=None, axis=0, constraints=()):
"""Solve matrix problem Au = b along axis
This routine also applies boundary conditions and constraints,
and performes LU-decomposition on the fully assembled matrix.
Parameters
----------
b : array
Array of right hand side on entry and solution on exit unless
u is provided.
u : array, optional
Output array
axis : int, optional
The axis over which to solve for if b and u are multidimensional
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
Note
----
If u is not provided, then b is overwritten with the solution and returned
"""
if u is None:
u = b
b = self.apply_bcs(b, u, axis=axis)
b = self.apply_constraints(b, constraints, axis=axis)
lu = self.perform_lu() # LU must be performed after constraints, because constraints modify the matrix
u = self.solve(b, u, axis=axis, lu=lu)
if hasattr(u, 'set_boundary_dofs'):
u.set_boundary_dofs()
return u
class BandedMatrixSolver(SparseMatrixSolver):
def __init__(self, mat):
SparseMatrixSolver.__init__(self, mat)
self._lu = self.mat.diags('dia')
def solve(self, b, u, axis, lu):
if u is not b:
sl = u.function_space().slice() if hasattr(u, 'function_space') else slice(None)
u[sl] = b[sl]
self.Solve(u, lu.data, axis=axis)
return u
@staticmethod
def LU(data):
"""LU-decomposition using either Cython or Numba
Parameters
----------
data : 2D-array
Storage for dia-matrix on entry and L and U matrices
on exit.
"""
raise NotImplementedError
@staticmethod
def Solve(u, data, axis=0):
"""Fast solve using either Cython or Numba
Parameters
----------
u : array
rhs on entry, solution on exit
data : 2D-array
Storage for dia-matrix containing L and U matrices
axis : int, optional
The axis we are solving over
"""
raise NotImplementedError
class DiagMA(BandedMatrixSolver):
"""Diagonal matrix solver
Parameters
----------
mat : Diagonal SparseMatrix or list of SparseMatrices
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self.issymmetric = True
self._inner_arg = self._lu.data
def perform_lu(self):
return self._lu
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row'
self._lu.diagonal(0)[0] = 1
s = [slice(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
@staticmethod
@optimizer
def inner_solve(u, lu):
d = lu[0]
u[:d.shape[0]] /= d
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class TDMA(BandedMatrixSolver):
"""Tridiagonal matrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
Tridiagonal matrix with diagonals in offsets -2, 0, 2
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self.issymmetric = self.mat.issymmetric
@staticmethod
@optimizer
def LU(data):
ld = data[0, :-2]
d = data[1, :]
ud = data[2, 2:]
n = d.shape[0]
for i in range(2, n):
ld[i-2] = ld[i-2]/d[i-2]
d[i] = d[i] - ld[i-2]*ud[i-2]
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of TDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
s = [slice(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def inner_solve(u, data):
ld = data[0, :-2]
d = data[1, :]
ud = data[2, 2:]
n = d.shape[0]
for i in range(2, n):
u[i] -= ld[i-2]*u[i-2]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
for i in range(n - 3, -1, -1):
u[i] = (u[i] - ud[i]*u[i+2])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class TDMA_O(BandedMatrixSolver):
"""Tridiagonal matrix solver
Parameters
----------
mat : SparseMatrix
Symmetric tridiagonal matrix with diagonals in offsets -1, 0, 1
"""
# pylint: disable=too-few-public-methods
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def LU(data):
ld = data[0, :-1]
d = data[1, :]
ud = data[2, 1:]
n = d.shape[0]
for i in range(1, n):
ld[i-1] = ld[i-1]/d[i-1]
d[i] -= ld[i-1]*ud[i-1]
@staticmethod
@optimizer
def inner_solve(u, data):
ld = data[0, :-1]
d = data[1, :]
ud = data[2, 1:]
n = d.shape[0]
for i in range(1, n):
u[i] -= ld[i-1]*u[i-1]
u[n-1] = u[n-1]/d[n-1]
for i in range(n-2, -1, -1):
u[i] = (u[i] - ud[i]*u[i+1])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class PDMA(BandedMatrixSolver):
"""Pentadiagonal matrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
Pentadiagonal matrix with diagonals in offsets
-4, -2, 0, 2, 4
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
assert len(self.mat) == 5
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of PDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
self._lu.diagonal(4)[0] = 0
if b.ndim > 1:
s = [slice(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
else:
b[0] = constraints[0][1]
self._inner_arg = self._lu.data
return b
@staticmethod
@optimizer
def LU(data): # pragma: no cover
"""LU decomposition"""
a = data[0, :-4]
b = data[1, :-2]
d = data[2, :]
e = data[3, 2:]
f = data[4, 4:]
n = d.shape[0]
m = e.shape[0]
k = n - m
for i in range(n-2*k):
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
e[i+k] -= lam*f[i]
b[i] = lam
lam = a[i]/d[i]
b[i+k] -= lam*e[i]
d[i+2*k] -= lam*f[i]
a[i] = lam
i = n-4
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
b[i] = lam
i = n-3
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
b[i] = lam
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def inner_solve(u, data):
a = data[0, :-4]
b = data[1, :-2]
d = data[2, :]
e = data[3, 2:]
f = data[4, 4:]
n = d.shape[0]
u[2] -= b[0]*u[0]
u[3] -= b[1]*u[1]
for k in range(4, n):
u[k] -= (b[k-2]*u[k-2] + a[k-4]*u[k-4])
u[n-1] /= d[n-1]
u[n-2] /= d[n-2]
u[n-3] = (u[n-3]-e[n-3]*u[n-1])/d[n-3]
u[n-4] = (u[n-4]-e[n-4]*u[n-2])/d[n-4]
for k in range(n-5, -1, -1):
u[k] = (u[k]-e[k]*u[k+2]-f[k]*u[k+4])/d[k]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class FDMA(BandedMatrixSolver):
"""4-diagonal matrix solver
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
4-diagonal matrix with diagonals in offsets -2, 0, 2, 4
"""
# pylint: disable=too-few-public-methods
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu
@staticmethod
@optimizer
def LU(data):
ld = data[0, :-2]
d = data[1, :]
u1 = data[2, 2:]
u2 = data[3, 4:]
n = d.shape[0]
for i in range(2, n):
ld[i-2] = ld[i-2]/d[i-2]
d[i] = d[i] - ld[i-2]*u1[i-2]
if i < n-2:
u1[i] = u1[i] - ld[i-2]*u2[i-2]
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of TDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
self._lu.diagonal(4)[0] = 0
s = [slice(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
@staticmethod
@optimizer
def inner_solve(u, data):
ld = data[0, :-2]
d = data[1, :]
u1 = data[2, 2:]
u2 = data[3, 4:]
n = d.shape[0]
for i in range(2, n):
u[i] -= ld[i-2]*u[i-2]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
u[n-3] = (u[n-3] - u1[n-3]*u[n-1])/d[n-3]
u[n-4] = (u[n-4] - u1[n-4]*u[n-2])/d[n-4]
for i in range(n - 5, -1, -1):
u[i] = (u[i] - u1[i]*u[i+2] - u2[i]*u[i+4])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class TwoDMA(BandedMatrixSolver):
"""2-diagonal matrix solver
Parameters
----------
mat : SparseMatrix
2-diagonal matrix with diagonals in offsets 0, 2
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self._inner_arg = self._lu.data
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of TwoDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
s = [slice(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
def perform_lu(self):
return self._lu
@staticmethod
@optimizer
def inner_solve(u, data):
d = data[0, :]
u1 = data[1, 2:]
n = d.shape[0]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
for i in range(n - 3, -1, -1):
u[i] = (u[i] - u1[i]*u[i+2])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class ThreeDMA(BandedMatrixSolver):
"""3-diagonal matrix solver - all diagonals upper
Parameters
----------
mat : SparseMatrix
3-diagonal matrix with diagonals in offsets 0, 2, 4
"""
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self._inner_arg = self._lu.data
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row of TwoDMA'
self._lu.diagonal(0)[0] = 1
self._lu.diagonal(2)[0] = 0
self._lu.diagonal(4)[0] = 0
s = [slice(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b
def perform_lu(self):
return self._lu
@staticmethod
@optimizer
def inner_solve(u, data):
d = data[0, :]
u1 = data[1, 2:]
u2 = data[1, 4:]
n = d.shape[0]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
u[n-3] = (u[n-3]-u1[n-3]*u[n-1])/d[n-3]
u[n-4] = (u[n-4]-u1[n-4]*u[n-2])/d[n-4]
for i in range(n - 5, -1, -1):
u[i] = (u[i] - u1[i]*u[i+2] - u2[i]*u[i+4])/d[i]
@staticmethod
@optimizer
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version')
class Solve(SparseMatrixSolver):
"""Generic solver class for SparseMatrix
Possibly with inhomogeneous boundary values
Parameters
----------
mat : SparseMatrix or list of SparseMatrices
format : str, optional
The format of the scipy.sparse.spmatrix to convert into
before solving. Default is Compressed Sparse Column `csc`.
Note
----
This solver converts the matrix to a Scipy sparse matrix of choice and
uses `scipy.sparse` methods `splu` and `spsolve`.
"""
def __init__(self, mat, format=None):
format = config['matrix']['sparse']['solve'] if format is None else format
SparseMatrixSolver.__init__(self, mat)
self.mat = self.mat.diags(format)
class SolverGeneric2ND:
"""Generic solver for problems consisting of tensorproduct matrices
containing two non-diagonal submatrices.
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
In addition to two non-diagonal matrices, the solver can also handle one
additional diagonal matrix (one Fourier matrix).
"""
def __init__(self, tpmats):
tpmats = get_simplified_tpmatrices(tpmats)
bc_mats = extract_bc_matrices([tpmats])
self.tpmats = tpmats
self.bc_mats = bc_mats
self.T = tpmats[0].space
self.mats2D = {}
self._lu = None
def matvec(self, u, c):
c.fill(0)
if u.ndim == 2:
s0 = tuple(base.slice() for base in self.T)
c[s0] = self.mats2D.dot(u[s0].flatten()).reshape(self.T.dims())
else:
raise NotImplementedError
return c
def get_diagonal_axis(self):
naxes = self.T.get_nondiagonal_axes()
diagonal_axis = np.setxor1d([0, 1, 2], naxes)
assert len(diagonal_axis) == 1
return diagonal_axis[0]
def diags(self, i):
"""Return matrix for given index `i` in diagonal direction"""
if i in self.mats2D:
return self.mats2D[i]
if self.T.dimensions == 2:
# In 2D there's just 1 matrix, store and reuse
m = self.tpmats[0]
M0 = m.diags('csc')
for m in self.tpmats[1:]:
M0 = M0 + m.diags('csc')
else:
# 1 matrix per Fourier coefficient
naxes = self.T.get_nondiagonal_axes()
m = self.tpmats[0]
diagonal_axis = self.get_diagonal_axis()
sc = [0, 0, 0]
sc[diagonal_axis] = i if m.scale.shape[diagonal_axis] > 1 else 0
A0 = m.mats[naxes[0]].diags('csc')
A1 = m.mats[naxes[1]].diags('csc')
M0 = kron(A0, A1, 'csc')
M0 *= m.scale[tuple(sc)]
for m in self.tpmats[1:]:
A0 = m.mats[naxes[0]].diags('csc')
A1 = m.mats[naxes[1]].diags('csc')
M1 = kron(A0, A1, 'csc')
sc[diagonal_axis] = i if m.scale.shape[diagonal_axis] > 1 else 0
M1 *= m.scale[tuple(sc)]
M0 = M0 + M1
self.mats2D[i] = M0
return M0
def apply_constraints(self, b, constraints):
"""Apply constraints to matrix and rhs vector `b`
Parameters
----------
b : array
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
"""
if len(constraints) > 0:
if self._lu is None:
A = self.mats2D[0]
A = A.tolil()
for (row, val) in constraints:
_, zerorow = A[row].nonzero()
A[(row, zerorow)] = 0
A[row, row] = 1
b[row] = val
self.mats2D[0] = A.tocsc()
else:
for (row, val) in constraints:
b[row] = val
return b
def assemble(self):
if len(self.mats2D) == 0:
ndim = self.tpmats[0].dimensions
if ndim == 2:
mat = self.diags(0)
self.mats2D[0] = mat
elif ndim == 3:
diagonal_axis = self.get_diagonal_axis()
for i in range(self.T.shape(True)[diagonal_axis]):
M0 = self.diags(i)
self.mats2D[i] = M0
return self.mats2D
def perform_lu(self):
if self._lu is not None:
return self._lu
ndim = self.tpmats[0].dimensions
self._lu = {}
if ndim == 2:
self._lu[0] = splu(self.mats2D[0], permc_spec=config['matrix']['sparse']['permc_spec'])
else:
diagonal_axis = self.get_diagonal_axis()
for i in range(self.T.shape(True)[diagonal_axis]):
self._lu[i] = splu(self.mats2D[i], permc_spec=config['matrix']['sparse']['permc_spec'])
return self._lu
def __call__(self, b, u=None, constraints=()):
if u is None:
u = b
else:
assert u.shape == b.shape
if len(self.bc_mats) > 0:
u.set_boundary_dofs()
w0 = np.zeros_like(u)
for bc_mat in self.bc_mats:
b -= bc_mat.matvec(u, w0)
mats = self.assemble()
b = self.apply_constraints(b, constraints)
lu = self.perform_lu()
if u.ndim == 2:
s0 = self.T.slice()
bs = b[s0].flatten()
if b.dtype.char in 'fdg' or self.mats2D[0].dtype.char in 'FDG':
u[s0] = lu[0].solve(bs).reshape(self.T.dims())
else:
u.real[s0] = lu[0].solve(bs.real).reshape(self.T.dims())
u.imag[s0] = lu[0].solve(bs.imag).reshape(self.T.dims())
elif u.ndim == 3:
naxes = self.T.get_nondiagonal_axes()
diagonal_axis = self.get_diagonal_axis()
s0 = list(self.T.slice())
for i in range(self.T.shape(True)[diagonal_axis]):
s0[diagonal_axis] = i
bs = b[tuple(s0)].flatten()
shape = np.take(self.T.dims(), naxes)
if b.dtype.char in 'fdg' or self.mats2D[0].dtype.char in 'FDG':
u[tuple(s0)] = lu[i].solve(bs).reshape(shape)
else:
u.real[tuple(s0)] = lu[i].solve(bs.real).reshape(shape)
u.imag[tuple(s0)] = lu[i].solve(bs.imag).reshape(shape)
if hasattr(u, 'set_boundary_dofs'):
u.set_boundary_dofs()
return u
class SolverDiagonal:
"""Solver for purely diagonal matrices, like Fourier in Cartesian coordinates.
Parameters
----------
tpmats : sequence
sequence of instances of :class:`.TPMatrix`
"""
def __init__(self, tpmats):
tpmats = get_simplified_tpmatrices(tpmats)
assert len(tpmats) == 1
self.mat = tpmats[0]
def __call__(self, b, u=None, constraints=()):
return self.mat.solve(b, u=u, constraints=constraints)
class Solver2D:
"""Generic solver for tensorproductspaces in 2D
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
If there are boundary matrices in the list of mats, then
these matrices are used to modify the right hand side before
solving. If this is not the desired behaviour, then use
:func:`.extract_bc_matrices` on tpmats before using this class.
"""
def __init__(self, tpmats):
bc_mats = extract_bc_matrices([tpmats])
self.tpmats = tpmats
self.bc_mats = bc_mats
self._lu = None
m = tpmats[0]
self.T = T = m.space
assert m._issimplified is False, "Cannot use simplified matrices with this solver"
mat = m.diags(format='csc')
for m in tpmats[1:]:
mat = mat + m.diags('csc')
self.mat = mat
def matvec(self, u, c):
c.fill(0)
s0 = tuple(base.slice() for base in self.T)
c[s0] = self.mat.dot(u[s0].flatten()).reshape(self.T.dims())
return c
@staticmethod
def apply_constraints(A, b, constraints):
"""Apply constraints to matrix `A` and rhs vector `b`
Parameters
----------
A : Sparse matrix
b : array
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
"""
if len(constraints) > 0:
A = A.tolil()
for (row, val) in constraints:
_, zerorow = A[row].nonzero()
A[(row, zerorow)] = 0
A[row, row] = 1
b[row] = val
A = A.tocsc()
return A, b
def __call__(self, b, u=None, constraints=()):
if u is None:
u = b
else:
pass
#assert u.shape == b.shape
if len(self.bc_mats) > 0:
u.set_boundary_dofs()
w0 = np.zeros_like(u)
for bc_mat in self.bc_mats:
b -= bc_mat.matvec(u, w0)
s0 = tuple(base.slice() for base in self.T)
assert b.dtype.char == u.dtype.char
bs = b[s0].flatten()
self.mat, bs = self.apply_constraints(self.mat, bs, constraints)
if self._lu is None:
self._lu = splu(self.mat, permc_spec=config['matrix']['sparse']['permc_spec'])
if b.dtype.char in 'fdg' or self.mat.dtype.char in 'FDG':
u[s0] = self._lu.solve(bs).reshape(self.T.dims())
else:
u.imag[s0] = self._lu.solve(bs.imag).reshape(self.T.dims())
u.real[s0] = self._lu.solve(bs.real).reshape(self.T.dims())
if hasattr(u, 'set_boundary_dofs'):
u.set_boundary_dofs()
return u
class Solver3D(Solver2D):
"""Generic solver for tensorproductspaces in 3D
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
If there are boundary matrices in the list of mats, then
these matrices are used to modify the right hand side before
solving. If this is not the desired behaviour, then use
:func:`.extract_bc_matrices` on mats before using this class.
"""
def __init__(self, tpmats):
Solver2D.__init__(self, tpmats)
class SolverND(Solver2D):
"""Generic solver for tensorproductspaces in N dimensions
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
If there are boundary matrices in the list of mats, then
these matrices are used to modify the right hand side before
solving. If this is not the desired behaviour, then use
:func:`.extract_bc_matrices` on mats before using this class.
"""
def __init__(self, tpmats):
Solver2D.__init__(self, tpmats)
class SolverGeneric1ND:
"""Generic solver for tensorproduct matrices consisting of
non-diagonal matrices along only one axis and Fourier along
the others.
Parameters
----------
mats : sequence
sequence of instances of :class:`.TPMatrix`
Note
----
In addition to the one non-diagonal direction, the solver can also handle
up to two diagonal (Fourier) directions.
Also note that if there are boundary matrices in the list of mats, then
these matrices are used to modify the right hand side before
solving. If this is not the desired behaviour, then use
:func:`.extract_bc_matrices` on mats before using this class.
"""
def __init__(self, mats):
assert isinstance(mats, list)
mats = get_simplified_tpmatrices(mats)
assert len(mats[0].naxes) == 1
self.naxes = mats[0].naxes[0]
bc_mats = extract_bc_matrices([mats])
self.mats = mats
self.bc_mats = bc_mats
self.solvers1D = None
self.assemble()
self._lu = False
self._data = None
def matvec(self, u, c):
c.fill(0)
w0 = np.zeros_like(u)
for mat in self.mats:
c += mat.matvec(u, w0)
if len(self.bc_mats) > 0:
u.set_boundary_dofs()
for bc_mat in self.bc_mats:
c += bc_mat.matvec(u, w0)
return c
def assemble(self):
ndim = self.mats[0].dimensions
shape = self.mats[0].space.shape(True)
self.solvers1D = []
if ndim == 2:
zi = np.ndindex((1, shape[1])) if self.naxes == 0 else np.ndindex((shape[0], 1))
other_axis = (self.naxes+1) % 2
for i in zi:
sol = None
for mat in self.mats:
sc = mat.scale[i] if mat.scale.shape[other_axis] > 1 else mat.scale[0, 0]
if sol:
sol += mat.mats[self.naxes]*sc
else:
sol = mat.mats[self.naxes]*sc
self.solvers1D.append(Solver(sol))
elif ndim == 3:
s = [0, 0, 0]
n0, n1 = np.setxor1d((0, 1, 2), self.naxes)
for i in range(shape[n0]):
self.solvers1D.append([])
s[n0] = i
for j in range(shape[n1]):
sol = None
s[n1] = j
for mat in self.mats:
sc = np.broadcast_to(mat.scale, shape)[tuple(s)]
if sol:
sol += mat.mats[self.naxes]*sc
else:
sol = mat.mats[self.naxes]*sc
self.solvers1D[-1].append(Solver(sol))
def apply_constraints(self, b, constraints=()):
"""Apply constraints to solver
Note
----
The SolverGeneric1ND solver can only constrain the first dofs of
the diagonal axes. For Fourier this is the zero dof with the
constant basis function exp(0).
"""
if constraints == ():
return b
ndim = self.mats[0].dimensions
space = self.mats[0].space
z0 = space.local_slice()
paxes = np.setxor1d(range(ndim), self.naxes)
s = [0]*ndim
s[self.naxes] = slice(None)
s = tuple(s)
is_rank_zero = np.array([z0[i].start for i in paxes]).prod()
sol = self.solvers1D[0] if ndim == 2 else self.solvers1D[0][0]
if is_rank_zero != 0:
return b
sol.apply_constraints(b[s], constraints)
return b
def perform_lu(self):
if self._lu is True:
return
if isinstance(self.solvers1D[0], SparseMatrixSolver):
for m in self.solvers1D:
lu = m.perform_lu()
else:
for mi in self.solvers1D:
for mij in mi:
lu = mij.perform_lu()
self._lu = True
def get_data(self, is_rank_zero):
if not self._data is None:
return self._data
if self.mats[0].dimensions == 2:
data = np.zeros((len(self.solvers1D),)+self.solvers1D[-1]._inner_arg.shape)
for i, sol in enumerate(self.solvers1D):
if i == 0 and is_rank_zero:
continue
else:
data[i] = sol._inner_arg
elif self.mats[0].dimensions == 3:
data = np.zeros((len(self.solvers1D), len(self.solvers1D[0]))+self.solvers1D[-1][-1]._inner_arg.shape)
for i, m in enumerate(self.solvers1D):
for j, sol in enumerate(m):
if i == 0 and j == 0 and is_rank_zero:
continue
else:
data[i, j] = sol._inner_arg
self._data = data
return data
@staticmethod
@optimizer
def solve_data(u, data, sol, naxes, is_rank_zero):
s = [0]*u.ndim
s[naxes] = slice(None)
paxes = np.setxor1d(range(u.ndim), naxes)
if u.ndim == 2:
for i in range(u.shape[paxes[0]]):
if i == 0 and is_rank_zero:
continue
s[paxes[0]] = i
s0 = tuple(s)
sol(u[s0], data[i])
elif u.ndim == 3:
for i in range(u.shape[paxes[0]]):
s[paxes[0]] = i
for j in range(u.shape[paxes[1]]):
if i == 0 and j == 0 and is_rank_zero:
continue
s[paxes[1]] = j
s0 = tuple(s)
sol(u[s0], data[i, j])
return u
def fast_solve(self, u, b, solvers1D, naxes):
if u is not b:
u[:] = b
# Solve first for the possibly different Fourier wavenumber 0, or (0, 0) in 3D
# All other wavenumbers we assume have the same solver
sol0 = solvers1D[0] if u.ndim == 2 else solvers1D[0][0]
sol1 = solvers1D[-1] if u.ndim == 2 else solvers1D[-1][-1]
is_rank_zero = comm.Get_rank() == 0
if is_rank_zero:
s = [0]*u.ndim
s[naxes] = slice(None)
s = tuple(s)
sol0.inner_solve(u[s], sol0._inner_arg)
data = self.get_data(is_rank_zero)
sol = get_optimized(sol1.inner_solve, mode=config['optimization']['mode'])
u = self.solve_data(u, data, sol, naxes, is_rank_zero)
def solve(self, u, b, solvers1D, naxes):
if u is not b:
u[:] = b
s = [0]*u.ndim
s[naxes] = slice(None)
paxes = np.setxor1d(range(u.ndim), naxes)
if u.ndim == 2:
for i, sol in enumerate(solvers1D):
s[paxes[0]] = i
s0 = tuple(s)
sol.inner_solve(u[s0], sol._inner_arg)
elif u.ndim == 3:
for i, m in enumerate(solvers1D):
s[paxes[0]] = i
for j, sol in enumerate(m):
s[paxes[1]] = j
s0 = tuple(s)
sol.inner_solve(u[s0], sol._inner_arg)
def __call__(self, b, u=None, constraints=(), fast=True):
"""Solve problem with one non-diagonal direction
Parameters
----------
b : array, right hand side
u : array, solution
constraints : tuple of 2-tuples
Each 2-tuple (row, value) is a constraint set for the non-periodic
direction, for Fourier index 0 in 2D and (0, 0) in 3D
fast : bool
Use fast routine if possible. A fast routine is possible
for any system of matrices with a tailored solver, like the
TDMA, PDMA, FDMA and TwoDMA.
"""
if u is None:
u = b
else:
assert u.shape == b.shape
if len(self.bc_mats) > 0:
u.set_boundary_dofs()
w0 = np.zeros_like(u)
for bc_mat in self.bc_mats:
b -= bc_mat.matvec(u, w0)
b = self.apply_constraints(b, constraints)
if not self._lu:
self.perform_lu()
sol1 = self.solvers1D[-1] if u.ndim == 2 else self.solvers1D[-1][-1]
if isinstance(sol1._inner_arg, tuple):
fast = False
if not fast:
self.solve(u, b, self.solvers1D, self.naxes)
else:
self.fast_solve(u, b, self.solvers1D, self.naxes)
if hasattr(u, 'set_boundary_dofs'):
u.set_boundary_dofs()
return u
class BlockMatrixSolver:
def __init__(self, mats):
assert isinstance(mats, (BlockMatrix, list))
self.bc_mat = None
self._lu = None
if isinstance(mats, BlockMatrix):
mats = mats.get_mats()
bc_mats = extract_bc_matrices([mats])
assert len(mats) > 0
self.mat = BlockMatrix(mats)
if len(bc_mats) > 0:
self.bc_mat = BlockMatrix(bc_mats)
@staticmethod
def apply_constraint(A, b, offset, i, constraint):
if constraint is None or comm.Get_rank() > 0:
return A, b
if isinstance(i, int):
if i > 0:
return A, b
if isinstance(i, tuple):
if np.sum(np.array(i)) > 0:
return A, b
row = offset + constraint[1]
assert isinstance(constraint, tuple)
assert len(constraint) == 3
val = constraint[2]
b[row] = val
if A is not None:
A = A.tolil()
r = A.getrow(row).nonzero()
A[(row, r[1])] = 0
A[row, row] = 1
A = A.tocsc()
return A, b
def __call__(self, b, u=None, constraints=()):
from .forms.arguments import Function
import scipy.sparse as sp
space = b.function_space()
if u is None:
u = Function(space)
else:
assert u.shape == b.shape
if self.bc_mat: # Add contribution to right hand side due to inhomogeneous boundary conditions
u.set_boundary_dofs()
w0 = np.zeros_like(u)
b -= self.bc_mat.matvec(u, w0)
nvars = b.shape[0] if len(b.shape) > space.dimensions else 1
u = np.expand_dims(u, 0) if nvars == 1 else u
b = np.expand_dims(b, 0) if nvars == 1 else b
for con in constraints:
assert len(con) == 3
assert isinstance(con[0], Integral)
assert isinstance(con[1], Integral)
assert isinstance(con[2], Number)
self.mat.assemble()
if self._lu is None:
self._lu = {}
daxes = space.get_diagonal_axes()
sl, dims = space.get_ndiag_slices_and_dims()
gi = np.zeros(dims[-1], dtype=b.dtype)
for key, Ai in self.mat._Ai.items():
if len(daxes) > 0:
sl.T[daxes+1] = key if isinstance(key, int) else np.array(key)[:, None]
gi = b.copy_to_flattened(gi, key, dims, sl)
if key in self._lu:
lu = self._lu[key]
for con in constraints:
_, gi = self.apply_constraint(None, gi, dims[con[0]], key, con)
else:
for con in constraints:
Ai, gi = self.apply_constraint(Ai, gi, dims[con[0]], key, con)
lu = sp.linalg.splu(Ai, permc_spec=config['matrix']['block']['permc_spec'])
self._lu[key] = lu
if b.dtype.char in 'fdg' or lu.U.dtype.char in 'FDG':
u = u.copy_from_flattened(lu.solve(gi), key, dims, sl)
else:
u.real = u.real.copy_from_flattened(lu.solve(gi.real), key, dims, sl)
u.imag = u.imag.copy_from_flattened(lu.solve(gi.imag), key, dims, sl)
u = u.reshape(u.shape[1:]) if nvars == 1 else u
b = b.reshape(b.shape[1:]) if nvars == 1 else b
return u
|
spectralDNS/shenfun
|
shenfun/la.py
|
Python
|
bsd-2-clause
| 43,062
|
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import math
import numpy as np
from matplotlib import pylab as plt
from matplotlib import rcParams
from six.moves import range
__author__ = 'noe, marscher'
# taken from networkx.drawing.layout and added hold_dim
def _fruchterman_reingold(A, dim=2, k=None, pos=None, fixed=None,
iterations=50, hold_dim=None):
# Position nodes in adjacency matrix A using Fruchterman-Reingold
# Entry point for NetworkX graph is fruchterman_reingold_layout()
try:
nnodes, _ = A.shape
except AttributeError:
raise RuntimeError(
"fruchterman_reingold() takes an adjacency matrix as input")
A = np.asarray(A) # make sure we have an array instead of a matrix
if pos is None:
# random initial positions
pos = np.asarray(np.random.random((nnodes, dim)), dtype=A.dtype)
else:
# make sure positions are of same type as matrix
pos = pos.astype(A.dtype)
# optimal distance between nodes
if k is None:
k = np.sqrt(1.0 / nnodes)
# the initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t = 0.1
# simple cooling scheme.
# linearly step down by dt on each iteration so last iteration is size dt.
dt = t / float(iterations + 1)
delta = np.zeros((pos.shape[0], pos.shape[0], pos.shape[1]), dtype=A.dtype)
# the inscrutable (but fast) version
# this is still O(V^2)
# could use multilevel methods to speed this up significantly
for _ in range(iterations):
# matrix of difference between points
for i in range(pos.shape[1]):
delta[:, :, i] = pos[:, i, None] - pos[:, i]
# distance between points
distance = np.sqrt((delta**2).sum(axis=-1))
# enforce minimum distance of 0.01
distance = np.where(distance < 0.01, 0.01, distance)
# displacement "force"
displacement = np.transpose(np.transpose(delta) *
(k * k / distance**2 - A * distance / k))\
.sum(axis=1)
# update positions
length = np.sqrt((displacement**2).sum(axis=1))
length = np.where(length < 0.01, 0.1, length)
delta_pos = np.transpose(np.transpose(displacement) * t / length)
if fixed is not None:
# don't change positions of fixed nodes
delta_pos[fixed] = 0.0
# only update y component
if hold_dim == 0:
pos[:, 1] += delta_pos[:, 1]
# only update x component
elif hold_dim == 1:
pos[:, 0] += delta_pos[:, 0]
else:
pos += delta_pos
# cool temperature
t -= dt
pos = _rescale_layout(pos)
return pos
def _rescale_layout(pos, scale=1):
# rescale to (0,pscale) in all axes
# shift origin to (0,0)
lim = 0 # max coordinate for all axes
for i in range(pos.shape[1]):
pos[:, i] -= pos[:, i].min()
lim = max(pos[:, i].max(), lim)
# rescale to (0,scale) in all directions, preserves aspect
for i in range(pos.shape[1]):
pos[:, i] *= scale / lim
return pos
class NetworkPlot(object):
def __init__(self, A, pos=None, xpos=None, ypos=None):
r"""
Parameters
----------
A : ndarray(n,n)
weight matrix or adjacency matrix of the network to visualize
pos : ndarray(n,2)
user-defined positions
xpos : ndarray(n,)
user-defined x-positions
ypos : ndarray(n,)
user-defined y-positions
Examples
--------
We define first define a reactive flux by taking the following transition
matrix and computing TPT from state 2 to 3.
>>> import numpy as np
>>> P = np.array([[0.8, 0.15, 0.05, 0.0, 0.0],
... [0.1, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.1, 0.8, 0.0, 0.05],
... [0.0, 0.2, 0.0, 0.8, 0.0],
... [0.0, 0.02, 0.02, 0.0, 0.96]])
>>> from pyemma import msm
>>> F = msm.tpt(msm.markov_model(P), [2], [3])
now plot the gross flux
>>> NetworkPlot(F.gross_flux).plot_network() # doctest:+ELLIPSIS
<matplotlib.figure.Figure...
"""
if A.shape[0] >= 50:
import warnings
warnings.warn("The layout optimization method will take a long"
" time for large networks! It is recommended to"
" coarse grain your model first!")
self.A = A
self.pos = pos
self.xpos = xpos
self.ypos = ypos
def _draw_arrow(self, x1, y1, x2, y2, Dx, Dy, label="", width=1.0,
arrow_curvature=1.0, color="grey",
patchA=None, patchB=None, shrinkA=0, shrinkB=0):
"""
Draws a slightly curved arrow from (x1,y1) to (x2,y2).
Will allow the given patches at start end end.
"""
# set arrow properties
dist = math.sqrt(
((x2 - x1) / float(Dx))**2 + ((y2 - y1) / float(Dy))**2)
arrow_curvature *= 0.075 # standard scale
rad = arrow_curvature / (dist)
tail_width = width
head_width = max(0.5, 2 * width)
head_length = head_width
plt.annotate("",
xy=(x2, y2),
xycoords='data',
xytext=(x1, y1),
textcoords='data',
arrowprops=dict(arrowstyle='simple,head_length=%f,head_width=%f,tail_width=%f'
% (head_length, head_width, tail_width),
color=color, shrinkA=shrinkA, shrinkB=shrinkB,
patchA=patchA, patchB=patchB,
connectionstyle="arc3,rad=%f" % -rad),
zorder=0)
# weighted center position
center = np.array([0.55 * x1 + 0.45 * x2, 0.55 * y1 + 0.45 * y2])
v = np.array([x2 - x1, y2 - y1]) # 1->2 vector
vabs = np.abs(v)
vnorm = np.array([v[1], -v[0]]) # orthogonal vector
vnorm /= math.sqrt(np.dot(vnorm, vnorm)) # normalize
# cross product to determine the direction into which vnorm points
z = np.cross(v, vnorm)
if z < 0:
vnorm *= -1
offset = 0.5 * arrow_curvature * \
((vabs[0] / (vabs[0] + vabs[1]))
* Dx + (vabs[1] / (vabs[0] + vabs[1])) * Dy)
ptext = center + offset * vnorm
plt.text(ptext[0], ptext[1], label, size=14,
horizontalalignment='center', verticalalignment='center', zorder=1)
def plot_network(self,
state_sizes=None, state_scale=1.0, state_colors='#ff5500',
arrow_scale=1.0, arrow_curvature=1.0, arrow_labels='weights',
arrow_label_format='%10.2f', max_width=12, max_height=12,
figpadding=0.2, xticks=False, yticks=False):
"""
Draws a network using discs and curved arrows.
The thicknesses and labels of the arrows are taken from the off-diagonal matrix elements in A.
"""
if self.pos is None:
self.layout_automatic()
# number of nodes
n = len(self.pos)
# get bounds and pad figure
xmin = np.min(self.pos[:, 0])
xmax = np.max(self.pos[:, 0])
Dx = xmax - xmin
xmin -= Dx * figpadding
xmax += Dx * figpadding
Dx *= 1 + figpadding
ymin = np.min(self.pos[:, 1])
ymax = np.max(self.pos[:, 1])
Dy = ymax - ymin
ymin -= Dy * figpadding
ymax += Dy * figpadding
Dy *= 1 + figpadding
# sizes of nodes
if state_sizes is None:
state_sizes = 0.5 * state_scale * \
min(Dx, Dy)**2 * np.ones(n) / float(n)
else:
state_sizes = 0.5 * state_scale * \
min(Dx, Dy)**2 * state_sizes / (np.max(state_sizes) * float(n))
# automatic arrow rescaling
arrow_scale *= 1.0 / \
(np.max(self.A - np.diag(np.diag(self.A))) * math.sqrt(n))
# size figure
if (Dx / max_width > Dy / max_height):
figsize = (max_width, Dy * (max_width / Dx))
else:
figsize = (Dx / Dy * max_height, max_height)
fig = plt.gcf()
fig.set_size_inches(figsize, forward=True)
# font sizes
old_fontsize = rcParams['font.size']
rcParams['font.size'] = 20
# remove axis labels
frame = plt.gca()
if not xticks:
frame.axes.get_xaxis().set_ticks([])
if not yticks:
frame.axes.get_yaxis().set_ticks([])
# set node colors
if state_colors is None:
state_colors = '#ff5500' # None is not acceptable
if isinstance(state_colors, str):
state_colors = [state_colors] * n
else:
# transfrom from [0,1] to 255-scale
state_colors = [
plt.cm.binary(int(256.0 * state_colors[i])) for i in range(n)]
# set arrow labels
if isinstance(arrow_labels, np.ndarray):
L = arrow_labels
else:
L = np.empty(np.shape(self.A), dtype=object)
if arrow_labels is None:
L[:, :] = ''
elif arrow_labels.lower() == 'weights':
for i in range(n):
for j in range(n):
L[i, j] = arrow_label_format % self.A[i, j]
else:
rcParams['font.size'] = old_fontsize
raise ValueError('invalid arrow label format')
# draw circles
circles = []
for i in range(n):
fig = plt.gcf()
# choose color
c = plt.Circle(self.pos[i], radius=math.sqrt(
0.5 * state_sizes[i]) / 2.0, color=state_colors[i], zorder=2)
circles.append(c)
fig.gca().add_artist(c)
# add annotation
plt.text(self.pos[i][0], self.pos[i][1], str(i), size=14,
horizontalalignment='center', verticalalignment='center',
color='black', zorder=3)
assert len(circles) == n, "%i != %i" % (len(circles), n)
# draw arrows
for i in range(n):
for j in range(i + 1, n):
if (abs(self.A[i, j]) > 0):
self._draw_arrow(self.pos[i, 0], self.pos[i, 1],
self.pos[j, 0], self.pos[j, 1], Dx, Dy,
label=str(L[i, j]),
width=arrow_scale * self.A[i, j],
arrow_curvature=arrow_curvature,
patchA=circles[i], patchB=circles[j],
shrinkA=3, shrinkB=0)
if (abs(self.A[j, i]) > 0):
self._draw_arrow(self.pos[j, 0], self.pos[j, 1],
self.pos[i, 0], self.pos[i, 1], Dx, Dy,
label=str(L[j, i]),
width=arrow_scale * self.A[j, i],
arrow_curvature=arrow_curvature,
patchA=circles[j], patchB=circles[i],
shrinkA=3, shrinkB=0)
# plot
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
rcParams['font.size'] = old_fontsize
return fig
def _find_best_positions(self, G):
"""Finds best positions for the given graph (given as adjacency matrix)
nodes by minimizing a network potential.
"""
initpos = None
holddim = None
if self.xpos is not None:
y = np.random.random(len(self.xpos))
initpos = np.vstack((self.xpos, y)).T
holddim = 0
elif self.ypos is not None:
x = np.zeros_like(self.xpos)
initpos = np.vstack((x, self.ypos)).T
holddim = 1
# nothing to do
elif self.xpos is not None and self.ypos is not None:
return np.array([self.xpos, self.ypos]), 0
best_pos = _fruchterman_reingold(G, pos=initpos, dim=2, hold_dim=holddim)
# rescale fixed to user settings and balance the other coordinate
if self.xpos is not None:
# rescale x to fixed value
best_pos[:, 0] *= (np.max(self.xpos) - np.min(self.xpos)
) / (np.max(best_pos[:, 0]) - np.min(best_pos[:, 0]))
best_pos[:, 0] += np.min(self.xpos) - np.min(best_pos[:, 0])
# rescale y to balance
if np.max(best_pos[:, 1]) - np.min(best_pos[:, 1]) > 0.01:
best_pos[:, 1] *= (np.max(self.xpos) - np.min(self.xpos)
) / (np.max(best_pos[:, 1]) - np.min(best_pos[:, 1]))
if self.ypos is not None:
best_pos[:, 1] *= (np.max(self.ypos) - np.min(self.ypos)
) / (np.max(best_pos[:, 1]) - np.min(best_pos[:, 1]))
best_pos[:, 1] += np.min(self.ypos) - np.min(best_pos[:, 1])
# rescale x to balance
if np.max(best_pos[:, 0]) - np.min(best_pos[:, 0]) > 0.01:
best_pos[:, 0] *= (np.max(self.ypos) - np.min(self.ypos)
) / (np.max(best_pos[:, 0]) - np.min(best_pos[:, 0]))
return best_pos
def layout_automatic(self):
n = len(self.A)
I, J = np.where(self.A > 0.0)
# note: against intution this has to be of type float
A = np.zeros((n, n))
A[I, J] = 1
self.pos = self._find_best_positions(A)
def plot_markov_model(P, pos=None, state_sizes=None, state_scale=1.0,
state_colors='#ff5500', minflux=1e-6,
arrow_scale=1.0, arrow_curvature=1.0,
arrow_labels='weights', arrow_label_format='%2.e',
max_width=12, max_height=12, figpadding=0.2):
r"""Plots a network representation of a Markov model transition matrix
This visualization is not optimized for large matrices. It is meant to be
used for the visualization of small models with up to 10-20 states, e.g.
obtained by a HMM coarse-graining. If used with large network, the automatic
node positioning will be very slow and may still look ugly.
Parameters
----------
P : ndarray(n,n) or MSM object with attribute 'transition matrix'
Transition matrix or MSM object
pos : ndarray(n,2), optional, default=None
User-defined positions to draw the states on. If not given, will try
to place them automatically.
state_sizes : ndarray(n), optional, default=None
User-defined areas of the discs drawn for each state. If not given,
the stationary probability of P will be used.
state_colors : string or ndarray(n), optional, default='#ff5500' (orange)
Either a string with a Hex code for a single color used for all states,
or an array of values in [0,1] which will result in a grayscale plot
minflux : float, optional, default=1e-6
The minimal flux (p_i * p_ij) for a transition to be drawn
arrow_scale : float, optional, default=1.0
Relative arrow scale. Set to a value different from 1 to increase
or decrease the arrow width.
arrow_curvature : float, optional, default=1.0
Relative arrow curvature. Set to a value different from 1 to make
arrows more or less curved.
arrow_labels : 'weights', None or a ndarray(n,n) with label strings. Optional, default='weights'
Strings to be placed upon arrows. If None, no labels will be used.
If 'weights', the elements of P will be used. If a matrix of strings is
given by the user these will be used.
arrow_label_format : str, optional, default='%10.2f'
The numeric format to print the arrow labels
max_width = 12
The maximum figure width
max_height = 12
The maximum figure height
figpadding = 0.2
The relative figure size used for the padding
Returns
-------
fig, pos : matplotlib.Figure, ndarray(n,2)
a Figure object containing the plot and the positions of states.
Can be used later to plot a different network representation (e.g. the flux)
Examples
--------
>>> P = np.array([[0.8, 0.15, 0.05, 0.0, 0.0],
... [0.1, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.1, 0.8, 0.0, 0.05],
... [0.0, 0.2, 0.0, 0.8, 0.0],
... [0.0, 0.02, 0.02, 0.0, 0.96]])
>>> plot_markov_model(P) # doctest:+ELLIPSIS
(<matplotlib.figure.Figure..., array...)
"""
from pyemma.msm import analysis as msmana
if isinstance(P, np.ndarray):
P = P.copy()
else:
# MSM object? then get transition matrix first
P = P.transition_matrix.copy()
if state_sizes is None:
state_sizes = msmana.stationary_distribution(P)
if minflux > 0:
F = np.dot(np.diag(msmana.stationary_distribution(P)), P)
I, J = np.where(F < minflux)
P[I, J] = 0.0
plot = NetworkPlot(P, pos=pos)
ax = plot.plot_network(state_sizes=state_sizes, state_scale=state_scale,
state_colors=state_colors,
arrow_scale=arrow_scale, arrow_curvature=arrow_curvature,
arrow_labels=arrow_labels,
arrow_label_format=arrow_label_format,
max_width=max_width, max_height=max_height,
figpadding=figpadding, xticks=False, yticks=False)
return ax, plot.pos
def plot_flux(flux, pos=None, state_sizes=None, state_scale=1.0,
state_colors='#ff5500', minflux=1e-9,
arrow_scale=1.0, arrow_curvature=1.0, arrow_labels='weights',
arrow_label_format='%2.e', max_width=12, max_height=12,
figpadding=0.2, attribute_to_plot='net_flux'):
r"""Plots a network representation of the reactive flux
This visualization is not optimized for large fluxes. It is meant to be used
for the visualization of small models with up to 10-20 states, e.g. obtained
by a PCCA-based coarse-graining of the full flux. If used with large
network, the automatic node positioning will be very slow and may still look
ugly.
Parameters
----------
flux : :class:`ReactiveFlux <pyemma.msm.flux.ReactiveFlux>`
reactive flux object
pos : ndarray(n,2), optional, default=None
User-defined positions to draw the states on. If not given, will set the
x coordinates equal to the committor probability and try to place the y
coordinates automatically
state_sizes : ndarray(n), optional, default=None
User-defined areas of the discs drawn for each state. If not given, the
stationary probability of P will be used
state_colors : string or ndarray(n), optional, default='#ff5500' (orange)
Either a string with a Hex code for a single color used for all states,
or an array of values in [0,1] which will result in a grayscale plot
minflux : float, optional, default=1e-9
The minimal flux for a transition to be drawn
arrow_scale : float, optional, default=1.0
Relative arrow scale. Set to a value different from 1 to increase or
decrease the arrow width.
arrow_curvature : float, optional, default=1.0
Relative arrow curvature. Set to a value different from 1 to make arrows
more or less curved.
arrow_labels : 'weights', None or a ndarray(n,n) with label strings. Optional, default='weights'
Strings to be placed upon arrows. If None, no labels will be used. If
'weights', the elements of P will be used. If a matrix of strings is
given by the user these will be used.
arrow_label_format : str, optional, default='%10.2f'
The numeric format to print the arrow labels
max_width : int (default = 12)
The maximum figure width
max_height: int (default = 12)
The maximum figure height
figpadding: float (default = 0.2)
The relative figure size used for the padding
Returns
-------
(fig, pos) : matpotlib.Figure instance, ndarray
Axes instances containing the plot. Use pyplot.show() to display it.
The positions of states. Can be used later to plot a different network
representation (e.g. the flux).
Examples
--------
We define first define a reactive flux by taking the following transition
matrix and computing TPT from state 2 to 3
>>> import numpy as np
>>> P = np.array([[0.8, 0.15, 0.05, 0.0, 0.0],
... [0.1, 0.75, 0.05, 0.05, 0.05],
... [0.05, 0.1, 0.8, 0.0, 0.05],
... [0.0, 0.2, 0.0, 0.8, 0.0],
... [0.0, 0.02, 0.02, 0.0, 0.96]])
>>> from pyemma import msm
>>> F = msm.tpt(msm.markov_model(P), [2], [3])
>>> F.flux[:] *= 100
Scale the flux by 100 is basically a change of units to get numbers close
to 1 (avoid printing many zeros). Now we visualize the flux:
>>> plot_flux(F) # doctest:+ELLIPSIS
(<matplotlib.figure.Figure..., array...)
"""
F = getattr(flux, attribute_to_plot)
if minflux > 0:
I, J = np.where(F < minflux)
F[I, J] = 0.0
c = flux.committor
if state_sizes is None:
state_sizes = flux.stationary_distribution
plot = NetworkPlot(F, pos=pos, xpos=c)
ax = plot.plot_network(state_sizes=state_sizes, state_scale=state_scale,
state_colors=state_colors,
arrow_scale=arrow_scale, arrow_curvature=arrow_curvature,
arrow_labels=arrow_labels,
arrow_label_format=arrow_label_format,
max_width=max_width, max_height=max_height,
figpadding=figpadding, xticks=True, yticks=False)
plt.xlabel('Committor probability')
return ax, plot.pos
|
trendelkampschroer/PyEMMA
|
pyemma/plots/networks.py
|
Python
|
bsd-2-clause
| 23,764
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_chartarea01.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_2_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test XlsxWriter chartarea properties."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [82933248, 82952960]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_chartarea({
'border': {'none': 1},
'fill': {'color': 'red'}
})
chart.set_plotarea({
'border': {'color': 'yellow', 'width': 1, 'dash_type': 'dash'},
'fill': {'color': '#92D050'}
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
jkyeung/XlsxWriter
|
xlsxwriter/test/comparison/test_chart_chartarea02.py
|
Python
|
bsd-2-clause
| 1,825
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
*****************************
NOTE that this is a modified version from web2py 2.8.2. For full details on what has changed, see
https://github.com/OpenTreeOfLife/opentree/commits/master/custom_import.py
This file was patched (by jimallman, on 10/10/2017) to restore working python
imports. See the problems and solution reported here:
https://groups.google.com/forum/#!topic/web2py/k5193zQX6kM
*****************************
"""
import __builtin__
import os
import sys
import threading
import traceback
from gluon import current
NATIVE_IMPORTER = __builtin__.__import__
INVALID_MODULES = set(('', 'gluon', 'applications', 'custom_import'))
# backward compatibility API
def custom_import_install():
if __builtin__.__import__ == NATIVE_IMPORTER:
INVALID_MODULES.update(sys.modules.keys())
__builtin__.__import__ = custom_importer
def track_changes(track=True):
assert track in (True, False), "must be True or False"
current.request._custom_import_track_changes = track
def is_tracking_changes():
return current.request._custom_import_track_changes
class CustomImportException(ImportError):
pass
def custom_importer(name, globals=None, locals=None, fromlist=None, level=-1):
"""
The web2py custom importer. Like the standard Python importer but it
tries to transform import statements as something like
"import applications.app_name.modules.x".
If the import failed, fall back on naive_importer
"""
globals = globals or {}
locals = locals or {}
fromlist = fromlist or []
try:
if current.request._custom_import_track_changes:
base_importer = TRACK_IMPORTER
else:
base_importer = NATIVE_IMPORTER
except: # there is no current.request (should never happen)
base_importer = NATIVE_IMPORTER
# if not relative and not from applications:
if hasattr(current, 'request') \
and level <= 0 \
and not name.partition('.')[0] in INVALID_MODULES \
and isinstance(globals, dict):
import_tb = None
try:
try:
oname = name if not name.startswith('.') else '.'+name
return NATIVE_IMPORTER(oname, globals, locals, fromlist, level)
except ImportError:
items = current.request.folder.split(os.path.sep)
if not items[-1]:
items = items[:-1]
modules_prefix = '.'.join(items[-2:]) + '.modules'
if not fromlist:
# import like "import x" or "import x.y"
result = None
for itemname in name.split("."):
itemname = itemname.encode('utf-8')
new_mod = base_importer(
modules_prefix, globals, locals, [itemname], level)
try:
result = result or new_mod.__dict__[itemname]
except KeyError, e:
raise ImportError, 'Cannot import module %s' % str(e)
modules_prefix += "." + itemname
return result
else:
# import like "from x import a, b, ..."
pname = modules_prefix + "." + name
return base_importer(pname, globals, locals, fromlist, level)
except ImportError, e1:
import_tb = sys.exc_info()[2]
try:
return NATIVE_IMPORTER(name, globals, locals, fromlist, level)
except ImportError, e3:
raise ImportError, e1, import_tb # there an import error in the module
except Exception, e2:
raise e2 # there is an error in the module
finally:
if import_tb:
import_tb = None
return NATIVE_IMPORTER(name, globals, locals, fromlist, level)
class TrackImporter(object):
"""
An importer tracking the date of the module files and reloading them when
they have changed.
"""
THREAD_LOCAL = threading.local()
PACKAGE_PATH_SUFFIX = os.path.sep + "__init__.py"
def __init__(self):
self._import_dates = {} # Import dates of the files of the modules
def __call__(self, name, globals=None, locals=None, fromlist=None, level=-1):
"""
The import method itself.
"""
globals = globals or {}
locals = locals or {}
fromlist = fromlist or []
try:
# Check the date and reload if needed:
self._update_dates(name, globals, locals, fromlist, level)
# Try to load the module and update the dates if it works:
result = NATIVE_IMPORTER(name, globals, locals, fromlist, level)
# Module maybe loaded for the 1st time so we need to set the date
self._update_dates(name, globals, locals, fromlist, level)
return result
except Exception, e:
raise # Don't hide something that went wrong
def _update_dates(self, name, globals, locals, fromlist, level):
"""
Update all the dates associated to the statement import. A single
import statement may import many modules.
"""
self._reload_check(name, globals, locals, level)
for fromlist_name in fromlist or []:
pname = "%s.%s" % (name, fromlist_name)
self._reload_check(pname, globals, locals, level)
def _reload_check(self, name, globals, locals, level):
"""
Update the date associated to the module and reload the module if
the file has changed.
"""
module = sys.modules.get(name)
file = self._get_module_file(module)
if file:
date = self._import_dates.get(file)
new_date = None
reload_mod = False
mod_to_pack = False # Module turning into a package? (special case)
try:
new_date = os.path.getmtime(file)
except:
self._import_dates.pop(file, None) # Clean up
# Handle module changing in package and
#package changing in module:
if file.endswith(".py"):
# Get path without file ext:
file = os.path.splitext(file)[0]
reload_mod = os.path.isdir(file) \
and os.path.isfile(file + self.PACKAGE_PATH_SUFFIX)
mod_to_pack = reload_mod
else: # Package turning into module?
file += ".py"
reload_mod = os.path.isfile(file)
if reload_mod:
new_date = os.path.getmtime(file) # Refresh file date
if reload_mod or not date or new_date > date:
self._import_dates[file] = new_date
if reload_mod or (date and new_date > date):
if mod_to_pack:
# Module turning into a package:
mod_name = module.__name__
del sys.modules[mod_name] # Delete the module
# Reload the module:
NATIVE_IMPORTER(mod_name, globals, locals, [], level)
else:
reload(module)
def _get_module_file(self, module):
"""
Get the absolute path file associated to the module or None.
"""
file = getattr(module, "__file__", None)
if file:
# Make path absolute if not:
file = os.path.splitext(file)[0] + ".py" # Change .pyc for .py
if file.endswith(self.PACKAGE_PATH_SUFFIX):
file = os.path.dirname(file) # Track dir for packages
return file
TRACK_IMPORTER = TrackImporter()
|
OpenTreeOfLife/opentree
|
custom_import.py
|
Python
|
bsd-2-clause
| 7,868
|
"""income_tract.py
Extract the household income per tract for each cbsa, using the
crosswalk between CBSA and Tracts.
"""
import csv
import os
# Income file comprises estimates and margin of error
income_rows = [5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35]
#
# Import data
#
## CBSA to tract
tr_to_cbsa = {}
with open('data/crosswalks/cbsa_tract.txt', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
tr_to_cbsa[rows[1]] = rows[0]
## Income data at blockgroup level
incomes = {}
with open('data/income/us/ACS_14_5YR_B19001.csv', 'r') as source:
reader = csv.reader(source, delimiter=',')
reader.next()
reader.next()
for rows in reader:
incomes[rows[1]] = [int(rows[i]) for i in income_rows]
#
# Group by CBSA
#
incomes_cbsa = {}
in_cbsa = 0
out_cbsa = 0
for bg in incomes:
tr = bg[:11]
if tr in tr_to_cbsa:
in_cbsa += 1
cbsa = tr_to_cbsa[tr]
if cbsa not in incomes_cbsa:
incomes_cbsa[cbsa] = {}
if tr not in incomes_cbsa[cbsa]:
incomes_cbsa[cbsa][tr] = [0 for i in income_rows]
## Add value from blockgroup
for i,val in enumerate(incomes[bg]):
incomes_cbsa[cbsa][tr][i] += val
else:
out_cbsa += 1
print '%s tracts are inside CBSAs'%in_cbsa
print '%s tracts are outside CBSAs'%out_cbsa
#
# Save the data
#
for cbsa in incomes_cbsa:
## Create dir if needed
if not os.path.isdir('data/income/cbsa/%s'%cbsa):
os.mkdir('data/income/cbsa/%s'%cbsa)
## Save
with open('data/income/cbsa/%s/%s_income_tract.txt'%(cbsa, cbsa), 'w') as output:
output.write("TRACT FIP\tLess than $10000\t$10000-$14999\t$15000-$19999\t$20000-$24999\t$25000-$29999\t$30000-$34999\t$35000-$39999\t$40000-$44999\t$45000-$49999\t$50000-$59999\t$60000-$74999\t$75000-$99999\t$100000-$124999\t$125000-$149999\t$150000-$199999\t$200000 or more\n")
for tr in incomes_cbsa[cbsa]:
output.write(str(tr)+'\t')
output.write('\t'.join(map(str, incomes_cbsa[cbsa][tr])))
output.write('\n')
|
scities-data/metro-atlas_2014
|
bin/income/income_tract.py
|
Python
|
bsd-2-clause
| 2,140
|
class TschunkMap1(): #(TschunkMap):
def __init__(self):
self.img = 'img/map1.png'
self.figure = 'todo'
self.rows = 15
self.cols = 7
self.origin_x = 1
self.origin_y = 13
self.initial_direction = (0, -1)
|
TeamTschunk/TschunkView
|
map1.py
|
Python
|
bsd-2-clause
| 267
|
from collections import OrderedDict
import os.path
import shutil
import pytest
from edalize import get_edatool
tests_dir = os.path.dirname(__file__)
class TestFixture:
"""A fixture that makes an edalize backend with work_root directory
Create this object using the make_edalize_test factory fixture. This passes
through its `tool_name` and sets up a temporary directory for `work_root`,
then passes its keyword arguments through to the TestFixture initializer.
Args:
tool_name: The name of the tool
work_root: The directory to treat as a work root
test_name: The name to call the backend. Defaults to
`'test_<tool_name>_0'`
param_types: A list of parameter types. Defaults to `['plusarg',
'vlogdefine', 'vlogparam']` (the parameter types supported
by most simulators).
files: A list of files to use. Defaults to `None`, which means to use
:py:data:`FILES`.
tool_options: Dictionary passed to _setup_backend. Defaults to `{}`.
ref_dir: A reference directory relative to `test_<tool_name>`. Defaults
to `'.'`
use_vpi: If true, set up backend with definitions from :attr:`VPI`.
Defaults to `False`.
"""
def __init__(
self,
tool_name,
work_root,
test_name=None,
param_types=["plusarg", "vlogdefine", "vlogparam"],
files=None,
tool_options={},
ref_dir=".",
use_vpi=False,
toplevel="top_module",
):
raw_ref_dir = os.path.join(tests_dir, "test_" + tool_name, ref_dir)
self.test_name = (
"test_{}_0".format(tool_name) if test_name is None else test_name
)
self.ref_dir = os.path.normpath(raw_ref_dir)
self.work_root = work_root
self.backend = _setup_backend(
self.test_name,
tool_name,
param_types,
files,
tool_options,
work_root,
use_vpi,
toplevel,
)
def compare_files(self, files, ref_subdir="."):
"""Check some files in the work root match those in the ref directory
The files argument gives the list of files to check. These are
interpreted as paths relative to the work directory and relative to
self.ref_dir / ref_subdir.
This is a wrapper around edalize_common.compare_files: see its
documentation for how to use the :envvar:`GOLDEN_RUN` environment
variable to copy across a golden reference.
"""
ref_dir = os.path.normpath(os.path.join(self.ref_dir, ref_subdir))
return compare_files(ref_dir, self.work_root, files)
def copy_to_work_root(self, path):
shutil.copy(
os.path.join(self.ref_dir, path), os.path.join(self.work_root, path)
)
@pytest.fixture
def make_edalize_test(monkeypatch, tmpdir):
"""A factory fixture to make an edalize backend with work_root directory
The returned factory method takes a `tool_name` (the name of the tool) and
the keyword arguments supported by :class:`TestFixture`. It returns a
:class:`TestFixture` object, whose `work_root` is a temporary directory.
"""
# Prepend directory `mock_commands` to PATH environment variable
monkeypatch.setenv("PATH", os.path.join(tests_dir, "mock_commands"), ":")
created = []
def _fun(tool_name, **kwargs):
work_root = tmpdir / str(len(created))
work_root.mkdir()
fixture = TestFixture(tool_name, str(work_root), **kwargs)
created.append(fixture)
return fixture
return _fun
def compare_files(ref_dir, work_root, files):
"""Check that all *files* in *work_root* match those in *ref_dir*.
If the environment variable :envvar:`GOLDEN_RUN` is set, the *files* in
*work_root* are copied to *ref_dir* to become the new reference.
"""
for f in files:
reference_file = os.path.join(ref_dir, f)
generated_file = os.path.join(work_root, f)
assert os.path.exists(generated_file)
if "GOLDEN_RUN" in os.environ:
shutil.copy(generated_file, reference_file)
with open(reference_file) as fref, open(generated_file) as fgen:
assert fref.read() == fgen.read(), f
def param_gen(paramtypes):
"""Generate dictionary of definitions in *paramtypes* list."""
defs = OrderedDict()
for paramtype in paramtypes:
for datatype in ["bool", "int", "str"]:
if datatype == "int":
default = 42
elif datatype == "str":
default = "hello"
else:
default = True
defs[paramtype + "_" + datatype] = {
"datatype": datatype,
"default": default,
"description": "",
"paramtype": paramtype,
}
return defs
def _setup_backend(
name, tool, paramtypes, files, tool_options, work_root, use_vpi, toplevel
):
"""Set up a backend.
The backend is called *name*, is set up for *tool* with *tool_options*,
*paramtypes*, and, if *use_vpi* is ``True``, definitions from :attr:`VPI`.
If *files* is None, files are taken from :attr:`FILES`.
"""
parameters = param_gen(paramtypes)
_vpi = []
if use_vpi:
_vpi = VPI
for v in VPI:
for f in v["src_files"]:
_f = os.path.join(work_root, f)
if not os.path.exists(os.path.dirname(_f)):
os.makedirs(os.path.dirname(_f))
with open(_f, "a"):
os.utime(_f, None)
edam = {
"name": name,
"files": FILES if files is None else files,
"parameters": parameters,
"tool_options": {tool: tool_options},
"toplevel": toplevel,
"vpi": _vpi,
}
return get_edatool(tool)(edam=edam, work_root=work_root)
FILES = [
{"name": "qip_file.qip", "file_type": "QIP"},
{"name": "qsys_file", "file_type": "QSYS"},
{"name": "sdc_file", "file_type": "SDC"},
{"name": "bmm_file", "file_type": "BMM"},
{"name": "sv_file.sv", "file_type": "systemVerilogSource"},
{"name": "pcf_file.pcf", "file_type": "PCF"},
{"name": "ucf_file.ucf", "file_type": "UCF"},
{"name": "user_file", "file_type": "user"},
{"name": "tcl_file.tcl", "file_type": "tclSource"},
{"name": "waiver_file.waiver", "file_type": "waiver"},
{"name": "vlog_file.v", "file_type": "verilogSource"},
{"name": "vlog05_file.v", "file_type": "verilogSource-2005"},
{"name": "vlog_incfile", "file_type": "verilogSource", "is_include_file": True},
{"name": "vhdl_file.vhd", "file_type": "vhdlSource"},
{"name": "vhdl_lfile", "file_type": "vhdlSource", "logical_name": "libx"},
{"name": "vhdl2008_file", "file_type": "vhdlSource-2008"},
{"name": "xci_file.xci", "file_type": "xci"},
{"name": "xdc_file.xdc", "file_type": "xdc"},
{"name": "bootrom.mem", "file_type": "mem"},
{"name": "c_file.c", "file_type": "cSource"},
{"name": "cpp_file.cpp", "file_type": "cppSource"},
{"name": "c_header.h", "file_type": "cSource", "is_include_file": True},
{"name": "c_header.h", "file_type": "cppSource", "is_include_file": True},
{"name": "config.vbl", "file_type": "veribleLintRules"},
{"name": "verible_waiver.vbw", "file_type": "veribleLintWaiver"},
{"name": "verible_waiver2.vbw", "file_type": "veribleLintWaiver"},
{"name": "config.sby.j2", "file_type": "sbyConfigTemplate"},
{"name": "another_sv_file.sv", "file_type": "systemVerilogSource"},
{"name": "pdc_constraint_file.pdc", "file_type": "PDC"},
{"name": "pdc_floorplan_constraint_file.pdc", "file_type": "FPPDC"},
{"name": "lpf_file.lpf", "file_type": "LPF"},
]
"""Files of all supported file types."""
VPI = [
{
"src_files": ["src/vpi_1/f1", "src/vpi_1/f3"],
"include_dirs": ["src/vpi_1/"],
"libs": ["some_lib"],
"name": "vpi1",
},
{"src_files": ["src/vpi_2/f4"], "include_dirs": [], "libs": [], "name": "vpi2"},
]
"""Predefined VPI modules to build."""
|
SymbiFlow/edalize
|
tests/edalize_common.py
|
Python
|
bsd-2-clause
| 8,229
|
# Inviwo Python script
import inviwo
import math
import time
start = time.clock()
scale = 1;
d = 15
steps = 120
for i in range(0, steps):
r = (2 * 3.14 * i) / steps
x = d*math.sin(r)
z = -d*math.cos(r)
inviwo.setPropertyValue("EntryExitPoints.camera",((x*scale,3*scale,z*scale),(0,0,0),(0,1,0)))
for i in range(0, steps):
r = (2 * 3.14 * i) / (steps)
x = 1.0*math.sin(r)
z = 1.0*math.cos(r)
inviwo.setCameraUp("EntryExitPoints.camera",(x*scale,z*scale,0))
end = time.clock()
fps = 2*steps / (end - start)
fps = round(fps,3)
print("Frames per second: " + str(fps))
print("Time per frame: " + str(round(1000/fps,1)) + " ms")
|
sarbi127/inviwo
|
data/scripts/camerarotation.py
|
Python
|
bsd-2-clause
| 687
|
from twython import Twython
from django.conf import settings
from .base import BaseSource
class TwitterSource(BaseSource):
def __init__(self, uid=None, screen_name=None):
if uid is None and screen_name is None:
raise ValueError
self.uid = uid
self.screen_name = screen_name
def fetch(self):
APP_KEY = settings.SOCIAL_AUTH_TWITTER_KEY
APP_SECRET = settings.SOCIAL_AUTH_TWITTER_SECRET
twitter = Twython(
APP_KEY,
APP_SECRET,
settings.TWITTER_ACCESS_TOKEN,
settings.TWITTER_ACCESS_TOKEN_SECRET)
if self.uid:
tweets = twitter.get_user_timeline(user_id=self.uid)
else:
tweets = twitter.get_user_timeline(screen_name=self.screen_name)
for tweet in tweets:
yield {
'id': tweet['id'],
'content': tweet['text'],
'created_at': tweet['created_at'],
'entities': tweet['entities']
}
|
dudarev/reddoid
|
reddoid/sources/backends/twitter.py
|
Python
|
bsd-2-clause
| 1,028
|
import logging
from ..calling_conventions import SYSCALL_CC
from ..errors import AngrUnsupportedSyscallError
from ..procedures import SIM_PROCEDURES as P
from .simos import SimOS
_l = logging.getLogger('angr.simos.userland')
class SimUserland(SimOS):
"""
This is a base class for any SimOS that wants to support syscalls.
It uses the CLE kernel object to provide addresses for syscalls. Syscalls will be emulated as a jump to one of these
addresses, where a SimProcedure from the syscall library provided at construction time will be executed.
"""
def __init__(self, project, syscall_library=None, **kwargs):
super(SimUserland, self).__init__(project, **kwargs)
self.syscall_library = syscall_library.copy()
self.kernel_base = None
def configure_project(self):
super(SimUserland, self).configure_project()
self.kernel_base = self.project.loader.kernel_object.mapped_base
def syscall(self, state, allow_unsupported=True):
"""
Given a state, return the procedure corresponding to the current syscall.
This procedure will have .syscall_number, .display_name, and .addr set.
:param state: The state to get the syscall number from
:param allow_unsupported: Whether to return a "dummy" sycall instead of raising an unsupported exception
"""
if state.os_name in SYSCALL_CC[state.arch.name]:
cc = SYSCALL_CC[state.arch.name][state.os_name](state.arch)
else:
# Use the default syscall calling convention - it may bring problems
_l.warning("No syscall calling convention available for %s/%s", state.arch.name, state.os_name)
cc = SYSCALL_CC[state.arch.name]['default'](state.arch)
sym_num = cc.syscall_num(state)
possible = state.solver.eval_upto(sym_num, 2)
if len(possible) == 0:
raise AngrUnsupportedSyscallError("The program state is not satisfiable")
elif len(possible) == 1:
num = possible[0]
elif allow_unsupported:
num = self.syscall_library.maximum_syscall_number(self.arch.name) + 1 if self.syscall_library else 0
else:
raise AngrUnsupportedSyscallError("Got a symbolic syscall number")
proc = self.syscall_from_number(num, allow_unsupported=allow_unsupported)
proc.cc = cc
return proc
def is_syscall_addr(self, addr):
"""
Return whether or not the given address corresponds to a syscall.
"""
if self.kernel_base is None:
return False
addr -= self.kernel_base
return 0 <= addr < 0x4000 # TODO: make this number come from somewhere
def syscall_from_addr(self, addr, allow_unsupported=True):
"""
Get a syscall SimProcedure from an address.
:param addr: The address to convert to a syscall SimProcedure
:param allow_unsupported: Whether to return a dummy procedure for an unsupported syscall instead of raising an
exception.
:return: The SimProcedure for the syscall, or None if the address is not a syscall address.
"""
if not self.is_syscall_addr(addr):
return None
number = addr - self.kernel_base
return self.syscall_from_number(number, allow_unsupported=allow_unsupported)
def syscall_from_number(self, number, allow_unsupported=True):
if not allow_unsupported and not self.syscall_library:
raise AngrUnsupportedSyscallError("%s does not have a library of syscalls implemented" % self.name)
addr = number + self.kernel_base
if self.syscall_library is None:
proc = P['stubs']['syscall']()
elif not allow_unsupported and not self.syscall_library.has_implementation(number, self.arch):
raise AngrUnsupportedSyscallError("No implementation for syscall %d" % number)
else:
proc = self.syscall_library.get(number, self.arch)
proc.addr = addr
return proc
|
f-prettyland/angr
|
angr/simos/userland.py
|
Python
|
bsd-2-clause
| 4,085
|
#!/usr/env/bin/ python3
from setuptools import setup, Extension
#
#CXX_FLAGS = "-O3 -std=gnu++11 -Wall -Wno-comment"
#
## List of C/C++ sources that will conform the library
#sources = [
#
# "andrnx/clib/android.c",
#
#]
setup(name="andrnx",
version="0.1",
description="Package to convert from GNSS logger to Rinex files",
author='Miquel Garcia',
author_email='info@rokubun.cat',
url='https://www.rokubun.cat',
packages=['andrnx'],
test_suite="andrnx.test",
scripts=['bin/gnsslogger_to_rnx'])
|
rokubun/android_rinex
|
setup.py
|
Python
|
bsd-2-clause
| 547
|
#
# Copyright (C) 2014 Mathias Weber <mathew.weber@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
from user import *
from event import *
|
mweb/flask-starter
|
app/models/__init__.py
|
Python
|
bsd-2-clause
| 1,410
|
#! /usr/bin/python3
import sys
sys.path.append(".")
from linear.common.coordinator import Coordinator
import linear.twopc.config as config
if len(sys.argv) != 3:
raise RuntimeError("Invalid arguments. Call like this <name> <num_partitions>")
coordinator = Coordinator(sys.argv[1], int(sys.argv[2]), config.COORDINATOR_PORT, config.COORDINATOR_PORT_INTERNAL)
while coordinator.running:
coordinator.update()
coordinator.close()
|
kaimast/inanutshell
|
linear/twopc/coordinator.py
|
Python
|
bsd-2-clause
| 442
|
#!/usr/bin/env python
import messagebird
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--accessKey', help='access key for MessageBird API', type=str, required=True)
parser.add_argument('--webhookId', help='webhook that you want to read', type=str, required=True)
args = vars(parser.parse_args())
try:
client = messagebird.Client(args['accessKey'])
webhook = client.conversation_read_webhook(args['webhookId'])
# Print the object information.
print('The following information was returned as a Webhook object:')
print(webhook)
except messagebird.client.ErrorException as e:
print('An error occured while requesting a Webhook object:')
for error in e.errors:
print(' code : %d' % error.code)
print(' description : %s' % error.description)
print(' parameter : %s\n' % error.parameter)
|
messagebird/python-rest-api
|
examples/conversation_read_webhook.py
|
Python
|
bsd-2-clause
| 878
|
import psidialogs
s = psidialogs.choice(["1", "2", "3"], "Choose a number!")
if s is not None:
print(s)
|
ponty/psidialogs
|
psidialogs/examples/choice.py
|
Python
|
bsd-2-clause
| 109
|
##
##
# File auto-generated by PythonFileGenerator
__all__ = [
'ClusterMembersResponse',
'ContextsResponse',
'StatusResponse'
]
from .ClusterMembersResponse import ClusterMembersResponse
from .ContextsResponse import ContextsResponse
from .StatusResponse import StatusResponse
|
mjames-upc/python-awips
|
dynamicserialize/dstypes/com/raytheon/uf/common/management/response/diagnostic/__init__.py
|
Python
|
bsd-3-clause
| 327
|
from __future__ import absolute_import, print_function
import numpy as np
import warnings
def _bit_length_26(x):
if x == 0:
return 0
elif x == 1:
return 1
else:
return len(bin(x)) - 2
try:
from scipy.lib._version import NumpyVersion
except ImportError:
import re
string_types = basestring
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be >9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance.
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Notes
-----
All dev versions of the same (pre-)release compare equal.
Examples
--------
>>> from scipy.lib._version import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev-', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (string_types, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, string_types):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr(self):
return "NumpyVersion(%s)" % self.vstring
class ResettableCache(dict):
"""
Dictionary whose elements mey depend one from another.
If entry `B` depends on entry `A`, changing the values of entry `A` will
reset the value of entry `B` to a default (None); deleteing entry `A` will
delete entry `B`. The connections between entries are stored in a
`_resetdict` private attribute.
Parameters
----------
reset : dictionary, optional
An optional dictionary, associated a sequence of entries to any key
of the object.
items : var, optional
An optional dictionary used to initialize the dictionary
Examples
--------
>>> reset = dict(a=('b',), b=('c',))
>>> cache = resettable_cache(a=0, b=1, c=2, reset=reset)
>>> assert_equal(cache, dict(a=0, b=1, c=2))
>>> print("Try resetting a")
>>> cache['a'] = 1
>>> assert_equal(cache, dict(a=1, b=None, c=None))
>>> cache['c'] = 2
>>> assert_equal(cache, dict(a=1, b=None, c=2))
>>> cache['b'] = 0
>>> assert_equal(cache, dict(a=1, b=0, c=None))
>>> print("Try deleting b")
>>> del(cache['a'])
>>> assert_equal(cache, {})
"""
def __init__(self, reset=None, **items):
self._resetdict = reset or {}
dict.__init__(self, **items)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
# if hasattr needed for unpickling with protocol=2
if hasattr(self, '_resetdict'):
for mustreset in self._resetdict.get(key, []):
self[mustreset] = None
def __delitem__(self, key):
dict.__delitem__(self, key)
for mustreset in self._resetdict.get(key, []):
del(self[mustreset])
# def __getstate__(self):
# print('pickling wrapper', self.__dict__)
# return self.__dict__
#
# def __setstate__(self, dict_):
# print('unpickling wrapper', dict_)
# self.__dict__.update(dict_)
resettable_cache = ResettableCache
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target - 1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2 ** ((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2 ** _bit_length_26(quotient - 1)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
if NumpyVersion(np.__version__) >= '1.7.1':
np_matrix_rank = np.linalg.matrix_rank
else:
def np_matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
class CacheWriteWarning(UserWarning):
pass
class CachedAttribute(object):
def __init__(self, func, cachename=None, resetlist=None):
self.fget = func
self.name = func.__name__
self.cachename = cachename or '_cache'
self.resetlist = resetlist or ()
def __get__(self, obj, type=None):
if obj is None:
return self.fget
# Get the cache or set a default one if needed
_cachename = self.cachename
_cache = getattr(obj, _cachename, None)
if _cache is None:
setattr(obj, _cachename, resettable_cache())
_cache = getattr(obj, _cachename)
# Get the name of the attribute to set and cache
name = self.name
_cachedval = _cache.get(name, None)
# print("[_cachedval=%s]" % _cachedval)
if _cachedval is None:
# Call the "fget" function
_cachedval = self.fget(obj)
# Set the attribute in obj
# print("Setting %s in cache to %s" % (name, _cachedval))
try:
_cache[name] = _cachedval
except KeyError:
setattr(_cache, name, _cachedval)
# Update the reset list if needed (and possible)
resetlist = self.resetlist
if resetlist is not ():
try:
_cache._resetdict[name] = self.resetlist
except AttributeError:
pass
# else:
# print("Reading %s from cache (%s)" % (name, _cachedval))
return _cachedval
def __set__(self, obj, value):
errmsg = "The attribute '%s' cannot be overwritten" % self.name
warnings.warn(errmsg, CacheWriteWarning)
class _cache_readonly(object):
"""
Decorator for CachedAttribute
"""
def __init__(self, cachename=None, resetlist=None):
self.func = None
self.cachename = cachename
self.resetlist = resetlist or None
def __call__(self, func):
return CachedAttribute(func,
cachename=self.cachename,
resetlist=self.resetlist)
cache_readonly = _cache_readonly()
|
ljwolf/pysal
|
pysal/contrib/glm/utils.py
|
Python
|
bsd-3-clause
| 15,120
|
from .decorators import render_to_json
from .helper import HeadFileUploader, ImageFactory, BaseModelManager, get_first_letter, convertjson
|
xlk521/cloudguantou
|
utils/__init__.py
|
Python
|
bsd-3-clause
| 138
|
# -*- coding: utf-8 -*-
import logging
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from vkontakte_api.models import VkontakteManager, VkontaktePKModel
from .mixins import ParseGroupsMixin, PhotableModelMixin, UserableModelMixin, VideoableModelMixin
log = logging.getLogger('vkontakte_groups')
GROUP_TYPE_CHOICES = (
('group', u'Группа'),
('page', u'Страница'),
('event', u'Событие'),
)
class CheckMembersCountFailed(Exception):
pass
class GroupRemoteManager(VkontakteManager):
def api_call(self, *args, **kwargs):
if 'ids' in kwargs:
kwargs['group_ids'] = ','.join(map(lambda i: str(i), kwargs.pop('ids')))
return super(GroupRemoteManager, self).api_call(*args, **kwargs)
def search(self, q, offset=None, count=None):
kwargs = {'q': q}
if offset:
kwargs.update(offset=offset)
if count:
kwargs.update(count=count)
return self.get(method='search', **kwargs)
def fetch(self, *args, **kwargs):
"""
Add additional fields to parent fetch request
"""
if 'fields' not in kwargs:
kwargs['fields'] = 'members_count'
return super(GroupRemoteManager, self).fetch(*args, **kwargs)
def get_members_ids(self, group, check_count=True, **kwargs):
ids = set()
attempts = 0
kwargs['offset'] = 0
kwargs['group_id'] = group.remote_id
while True:
response = self.api_call('get_members', **kwargs)
ids_iteration = response.get('items', [])
for user_id in ids_iteration:
ids.add(int(user_id))
ids_iteration_count = len(ids_iteration)
ids_count = len(ids)
log.debug('Get members of group %s. Got %s, total %s, actual ammount %s, offset %s' % (
group, ids_iteration_count, ids_count, group.members_count, kwargs['offset']))
if ids_iteration_count != 0:
attempts = 0
kwargs['offset'] += ids_iteration_count
else:
try:
if check_count:
self.check_members_count(group, ids_count)
break
except CheckMembersCountFailed as e:
attempts += 1
if attempts <= 5:
log.warning('%s, offset %s, attempts %s' % (e, kwargs['offset'], attempts))
continue
else:
log.error(e)
raise
return list(ids)
def check_members_count(self, group, count):
if group.members_count and count > 0:
division = float(group.members_count) / count
if 0.99 > division or 1.01 < division:
raise CheckMembersCountFailed("Suspicious ammount of members fetched for group %s. "
"Actual ammount is %d, fetched %d, division is %s" % (
group, group.members_count, count, division))
@python_2_unicode_compatible
class Group(PhotableModelMixin, VideoableModelMixin, UserableModelMixin, VkontaktePKModel):
resolve_screen_name_types = ['group', 'page', 'event']
slug_prefix = 'club'
name = models.CharField(max_length=800)
screen_name = models.CharField(u'Короткое имя группы', max_length=50, db_index=True)
is_closed = models.NullBooleanField(u'Флаг закрытой группы')
is_admin = models.NullBooleanField(u'Пользователь является администратором')
members_count = models.IntegerField(u'Всего участников', null=True)
verified = models.NullBooleanField(u'Флаг официальной группы')
type = models.CharField(u'Тип объекта', max_length=10, choices=GROUP_TYPE_CHOICES)
photo = models.URLField()
photo_big = models.URLField()
photo_medium = models.URLField()
remote = GroupRemoteManager(remote_pk=('remote_id',), methods_namespace='groups', version=5.28, methods={
'get': 'getById',
'search': 'search',
'get_members': 'getMembers',
})
class Meta:
verbose_name = _('Vkontakte group')
verbose_name_plural = _('Vkontakte groups')
def __str__(self):
return self.name
@property
def refresh_kwargs(self):
return {'ids': [self.remote_id]}
@property
def wall_comments(self):
if 'vkontakte_wall' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_wall' not in INSTALLED_APPS")
from vkontakte_wall.models import Comment
# TODO: improve schema and queries with using owner_id field
return Comment.objects.filter(remote_id__startswith='-%s_' % self.remote_id)
@property
def topics_comments(self):
if 'vkontakte_board' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_board' not in INSTALLED_APPS")
from vkontakte_board.models import Comment
# TODO: improve schema and queries with using owner_id field
return Comment.objects.filter(remote_id__startswith='-%s_' % self.remote_id)
def fetch_posts(self, *args, **kwargs):
if 'vkontakte_wall' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_wall' not in INSTALLED_APPS")
from vkontakte_wall.models import Post
return Post.remote.fetch_wall(owner=self, *args, **kwargs)
def fetch_topics(self, *args, **kwargs):
if 'vkontakte_board' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_board' not in INSTALLED_APPS")
from vkontakte_board.models import Topic
return Topic.remote.fetch(group=self, *args, **kwargs)
def fetch_statistic(self, *args, **kwargs):
if 'vkontakte_groups_statistic' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_groups_statistic' not in INSTALLED_APPS")
from vkontakte_groups_statistic.models import fetch_statistic_for_group
return fetch_statistic_for_group(group=self, *args, **kwargs)
from . import signals
|
ramusus/django-vkontakte-groups
|
vkontakte_groups/models.py
|
Python
|
bsd-3-clause
| 6,504
|
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
from __future__ import division, print_function, absolute_import
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'OptimizeResult', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import sys
import numpy
from scipy._lib.six import callable, xrange
from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze,
vectorize, asarray, sqrt, Inf, asfarray, isinf)
import numpy as np
from .linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search,
LineSearchWarning)
from scipy._lib._util import getargspec_no_self as _getargspec
from scipy.linalg import get_blas_funcs
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.'}
class MemoizeJac(object):
""" Decorator that caches the value gradient of function each time it
is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self.x = None
def __call__(self, x, *args):
self.x = numpy.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
return fg[0]
def derivative(self, x, *args):
if self.jac is not None and numpy.alltrue(x == self.x):
return self.jac
else:
self(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess: ndarray
Values of objective function, its Jacobian and its Hessian (if
available). The Hessians may be approximations, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in Scipy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(numpy.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(numpy.abs(x))
elif ord == -Inf:
return numpy.amin(numpy.abs(x))
else:
return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord)
def rosen(x):
"""
The Rosenbrock function.
The function computed is::
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0)
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function.
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def rosen_der(x):
"""
The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the derivative is to be computed.
Returns
-------
rosen_der : (N,) ndarray
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def rosen_hess(x):
"""
The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
"""
x = atleast_1d(x)
H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""
Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
p : array_like
1-D array, the vector to be multiplied by the Hessian matrix.
Returns
-------
rosen_hess_prod : ndarray
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
"""
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
(202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -
400 * x[1:-1] * p[2:])
Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]
return Hp
def wrap_function(function, args):
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(*wrapper_args):
ncalls[0] += 1
return function(*(wrapper_args + args))
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None, initial_simplex=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e. ``f(x,*args)``.
xtol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
ftol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
initial_simplex : array_like of shape (N + 1, N), optional
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the j-th vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does. Both the ftol and
xtol criteria must be met for convergence.
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin(f, 1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 17
Function evaluations: 34
>>> minimum[0]
-8.8817841970012523e-16
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xatol': xtol,
'fatol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall,
'initial_simplex': initial_simplex}
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
maxiter=None, maxfev=None, disp=False,
return_all=False, initial_simplex=None,
xatol=1e-4, fatol=1e-4, **unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*200``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
initial_simplex : array_like of shape (N + 1, N)
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the j-th vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
xatol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
fatol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
"""
if 'ftol' in unknown_options:
warnings.warn("ftol is deprecated for Nelder-Mead,"
" use fatol instead. If you specified both, only"
" fatol is used.",
DeprecationWarning)
if (np.isclose(fatol, 1e-4) and
not np.isclose(unknown_options['ftol'], 1e-4)):
# only ftol was probably specified, use it.
fatol = unknown_options['ftol']
unknown_options.pop('ftol')
if 'xtol' in unknown_options:
warnings.warn("xtol is deprecated for Nelder-Mead,"
" use xatol instead. If you specified both, only"
" xatol is used.",
DeprecationWarning)
if (np.isclose(xatol, 1e-4) and
not np.isclose(unknown_options['xtol'], 1e-4)):
# only xtol was probably specified, use it.
xatol = unknown_options['xtol']
unknown_options.pop('xtol')
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
nonzdelt = 0.05
zdelt = 0.00025
x0 = asfarray(x0).flatten()
if initial_simplex is None:
N = len(x0)
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
sim[0] = x0
for k in range(N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
else:
sim = np.asfarray(initial_simplex).copy()
if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1:
raise ValueError("`initial_simplex` should be an array of shape (N+1,N)")
if len(x0) != sim.shape[1]:
raise ValueError("Size of `initial_simplex` is not consistent with `x0`")
N = sim.shape[1]
if retall:
allvecs = [sim[0]]
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 200
maxfun = N * 200
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 200
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 200
else:
maxfun = np.inf
one2np1 = list(range(1, N + 1))
fsim = numpy.zeros((N + 1,), float)
for k in range(N + 1):
fsim[k] = func(sim[k])
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xatol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= fatol):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x, final_simplex=(sim, fsim))
if retall:
result['allvecs'] = allvecs
return result
def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None):
"""
See ``approx_fprime``. An optional initial function value arg is added.
"""
if f0 is None:
f0 = f(*((xk,) + args))
grad = numpy.zeros((len(xk),), float)
ei = numpy.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
def approx_fprime(xk, f, epsilon, *args):
"""Finite-difference approximation of the gradient of a scalar function.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
The function of which to determine the gradient (partial derivatives).
Should take `xk` as first argument, other arguments to `f` can be
supplied in ``*args``. Should return a scalar, the value of the
function at `xk`.
epsilon : array_like
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`.
\\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
grad : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
The main use of `approx_fprime` is in scalar function optimizers like
`fmin_bfgs`, to determine numerically the Jacobian of a function.
Examples
--------
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004198])
"""
return _approx_fprime_helper(xk, f, epsilon, args=args)
def check_grad(func, grad, x0, *args, **kwargs):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \\*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08.
Returns
-------
err : float
The square root of the sum of squares (i.e. the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x):
... return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
... return [2 * x[0], -1.5 * x[1]**2]
>>> from scipy.optimize import check_grad
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08
"""
step = kwargs.pop('epsilon', _epsilon)
if kwargs:
raise ValueError("Unknown keyword arguments: %r" %
(list(kwargs.keys()),))
return sqrt(sum((grad(x0, *args) -
approx_fprime(x0, func, step, *args))**2))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
f2 = fprime(*((x0 + epsilon*p,) + args))
f1 = fprime(*((x0,) + args))
return (f2 - f1) / epsilon
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
extra_condition = kwargs.pop('extra_condition', None)
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is not None and extra_condition is not None:
xp1 = xk + ret[0] * pk
if not extra_condition(ret[0], xp1, ret[3], ret[5]):
# Reject step if extra_condition fails
ret = (None,)
if ret[0] is None:
# line search failed: try different one.
with warnings.catch_warnings():
warnings.simplefilter('ignore', LineSearchWarning)
kwargs2 = {}
for key in ('c1', 'c2', 'amax'):
if key in kwargs:
kwargs2[key] = kwargs[key]
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
extra_condition=extra_condition,
**kwargs2)
if ret[0] is None:
raise _LineSearchError()
return ret
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than gtol before successful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If fprime is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool, optional
Print convergence message if True.
retall : bool, optional
Return a list of results at each iteration if True.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e. the inverse hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : list
`OptimizeResult` at each iteration. Only returned if retall is True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'BFGS' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS)
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = numpy.eye(N, dtype=int)
Hk = I
# get needed blas functions
syr = get_blas_funcs('syr', dtype='d') # Symetric rank 1 update
syr2 = get_blas_funcs('syr2', dtype='d') # Symetric rank 2 update
symv = get_blas_funcs('symv', dtype='d') # Symetric matrix-vector product
# Sets the initial step guess to dx ~ 1
old_fval = f(x0)
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
xk = x0
if retall:
allvecs = [x0]
sk = [2 * gtol]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = symv(-1, Hk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval, amin=1e-100, amax=1e100)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not numpy.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
yk_sk = np.dot(yk, sk)
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / yk_sk
except ZeroDivisionError:
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
if isinf(rhok): # this is patch for numpy
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
# Heristic to adjust Hk for k == 0
# described at Nocedal/Wright "Numerical Optimization"
# p.143 formula (6.20)
if k == 0:
Hk = yk_sk / np.dot(yk, yk)*I
# Implement BFGS update using the formula:
# Hk <- Hk + ((Hk yk).T yk+sk.T yk)*(rhok**2)*sk sk.T -rhok*[(Hk yk)sk.T +sk(Hk yk).T]
# This formula is equivalent to (6.17) from
# Nocedal/Wright "Numerical Optimization"
# written in a more efficient way for implementation.
Hk_yk = symv(1, Hk, yk)
c = rhok**2 * (yk_sk+Hk_yk.dot(yk))
Hk = syr2(-rhok, sk, Hk_yk, a=Hk)
Hk = syr(c, sk, a=Hk)
k += 1
# The matrix Hk is obtained from the
# symmetric representation that were being
# used to store it.
Hk_triu = numpy.triu(Hk)
Hk_diag = numpy.diag(Hk)
Hk = Hk_triu + Hk_triu.T - numpy.diag(Hk_diag)
fval = old_fval
if np.isnan(fval):
# This can happen if the first call to f returned NaN;
# the loop is then never entered.
warnflag = 2
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using a nonlinear conjugate gradient algorithm.
Parameters
----------
f : callable, ``f(x, *args)``
Objective function to be minimized. Here `x` must be a 1-D array of
the variables that are to be changed in the search for a minimum, and
`args` are the other (fixed) parameters of `f`.
x0 : ndarray
A user-supplied initial estimate of `xopt`, the optimal value of `x`.
It must be a 1-D array of values.
fprime : callable, ``fprime(x, *args)``, optional
A function that returns the gradient of `f` at `x`. Here `x` and `args`
are as described above for `f`. The returned value must be a 1-D array.
Defaults to None, in which case the gradient is approximated
numerically (see `epsilon`, below).
args : tuple, optional
Parameter values passed to `f` and `fprime`. Must be supplied whenever
additional fixed parameters are needed to completely specify the
functions `f` and `fprime`.
gtol : float, optional
Stop when the norm of the gradient is less than `gtol`.
norm : float, optional
Order to use for the norm of the gradient
(``-np.Inf`` is min, ``np.Inf`` is max).
epsilon : float or ndarray, optional
Step size(s) to use when `fprime` is approximated numerically. Can be a
scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the
floating point machine precision. Usually ``sqrt(eps)`` is about
1.5e-8.
maxiter : int, optional
Maximum number of iterations to perform. Default is ``200 * len(x0)``.
full_output : bool, optional
If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in
addition to `xopt`. See the Returns section below for additional
information on optional return values.
disp : bool, optional
If True, return a convergence message, followed by `xopt`.
retall : bool, optional
If True, add to the returned values the results of each iteration.
callback : callable, optional
An optional user-supplied function, called after each iteration.
Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float, optional
Minimum value found, f(xopt). Only returned if `full_output` is True.
func_calls : int, optional
The number of function_calls made. Only returned if `full_output`
is True.
grad_calls : int, optional
The number of gradient calls made. Only returned if `full_output` is
True.
warnflag : int, optional
Integer value with warning status, only returned if `full_output` is
True.
0 : Success.
1 : The maximum number of iterations was exceeded.
2 : Gradient and/or function calls were not changing. May indicate
that precision was lost, i.e., the routine did not converge.
allvecs : list of ndarray, optional
List of arrays, containing the results at each iteration.
Only returned if `retall` is True.
See Also
--------
minimize : common interface to all `scipy.optimize` algorithms for
unconstrained and constrained minimization of multivariate
functions. It provides an alternative way to call
``fmin_cg``, by specifying ``method='CG'``.
Notes
-----
This conjugate gradient algorithm is based on that of Polak and Ribiere
[1]_.
Conjugate gradient methods tend to work better when:
1. `f` has a unique global minimizing point, and no local minima or
other stationary points,
2. `f` is, at least locally, reasonably well approximated by a
quadratic function of the variables,
3. `f` is continuous and has a continuous gradient,
4. `fprime` is not too large, e.g., has a norm less than 1000,
5. The initial guess, `x0`, is reasonably close to `f` 's global
minimizing point, `xopt`.
References
----------
.. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122.
Examples
--------
Example 1: seek the minimum value of the expression
``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values
of the parameters and an initial guess ``(u, v) = (0, 0)``.
>>> args = (2, 3, 7, 8, 9, 10) # parameter values
>>> def f(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f
>>> def gradf(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... gu = 2*a*u + b*v + d # u-component of the gradient
... gv = b*u + 2*c*v + e # v-component of the gradient
... return np.asarray((gu, gv))
>>> x0 = np.asarray((0, 0)) # Initial guess.
>>> from scipy import optimize
>>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 4
Function evaluations: 8
Gradient evaluations: 8
>>> res1
array([-1.80851064, -0.25531915])
Example 2: solve the same problem using the `minimize` function.
(This `myopts` dictionary shows all of the available options,
although in practice only non-default values would be needed.
The returned value will be a dictionary.)
>>> opts = {'maxiter' : None, # default value.
... 'disp' : True, # non-default value.
... 'gtol' : 1e-5, # default value.
... 'norm' : np.inf, # default value.
... 'eps' : 1.4901161193847656e-08} # default value.
>>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,
... method='CG', options=opts)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 4
Function evaluations: 8
Gradient evaluations: 8
>>> res2.x # minimum found
array([-1.80851064, -0.25531915])
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
xk = x0
# Sets the initial step guess to dx ~ 1
old_fval = f(xk)
old_old_fval = old_fval + np.linalg.norm(gfk) / 2
if retall:
allvecs = [xk]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk, ord=norm)
sigma_3 = 0.01
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk, gfk)
cached_step = [None]
def polak_ribiere_powell_step(alpha, gfkp1=None):
xkp1 = xk + alpha * pk
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
beta_k = max(0, numpy.dot(yk, gfkp1) / deltak)
pkp1 = -gfkp1 + beta_k * pk
gnorm = vecnorm(gfkp1, ord=norm)
return (alpha, xkp1, pkp1, gfkp1, gnorm)
def descent_condition(alpha, xkp1, fp1, gfkp1):
# Polak-Ribiere+ needs an explicit check of a sufficient
# descent condition, which is not guaranteed by strong Wolfe.
#
# See Gilbert & Nocedal, "Global convergence properties of
# conjugate gradient methods for optimization",
# SIAM J. Optimization 2, 21 (1992).
cached_step[:] = polak_ribiere_powell_step(alpha, gfkp1)
alpha, xk, pk, gfk, gnorm = cached_step
# Accept step if it leads to convergence.
if gnorm <= gtol:
return True
# Accept step if sufficient descent condition applies.
return numpy.dot(pk, gfk) <= -sigma_3 * numpy.dot(gfk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,
old_old_fval, c2=0.4, amin=1e-100, amax=1e100,
extra_condition=descent_condition)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
# Reuse already computed results if possible
if alpha_k == cached_step[0]:
alpha_k, xk, pk, gfk, gnorm = cached_step
else:
alpha_k, xk, pk, gfk, gnorm = polak_ribiere_powell_step(alpha_k, gfkp1)
if retall:
allvecs.append(xk)
if callback is not None:
callback(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable ``f(x, *args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x, *args)``
Gradient of f.
fhess_p : callable ``fhess_p(x, p, *args)``, optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable ``fhess(x, *args)``, optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
avextol : float, optional
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return the optional outputs.
disp : bool, optional
If True, print convergence message.
retall : bool, optional
If True, return a list of results at each iteration.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in python using numpy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable separately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.
"""
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall}
res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
res['nhev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Note that the `jac` parameter (Jacobian) is required.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
maxiter : int
Maximum number of iterations to perform.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Newton-CG method')
f = fun
fprime = jac
fhess_p = hessp
fhess = hess
avextol = xtol
epsilon = eps
retall = return_all
def terminate(warnflag, msg):
if disp:
print(msg)
print(" Current function value: %f" % old_fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
fval = old_fval
result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0],
njev=gcalls[0], nhev=hcalls, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
gcalls, fprime = wrap_function(fprime, args)
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
cg_maxiter = 20*len(x0)
xtol = len(x0) * avextol
update = [2 * xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
old_fval = f(x0)
old_old_fval = None
float64eps = numpy.finfo(numpy.float64).eps
while numpy.add.reduce(numpy.abs(update)) > xtol:
if k >= maxiter:
msg = "Warning: " + _status_message['maxiter']
return terminate(1, msg)
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = numpy.add.reduce(numpy.abs(b))
eta = numpy.min([0.5, numpy.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = numpy.dot(ri, ri)
if fhess is not None: # you want to compute hessian once.
A = fhess(*(xk,) + args)
hcalls = hcalls + 1
for k2 in xrange(cg_maxiter):
if numpy.add.reduce(numpy.abs(ri)) <= termcond:
break
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
else:
Ap = fhess_p(xk, psupi, *args)
hcalls = hcalls + 1
else:
Ap = numpy.dot(A, psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = numpy.dot(psupi, Ap)
if 0 <= curv <= 3 * float64eps:
break
elif curv < 0:
if (i > 0):
break
else:
# fall back to steepest descent direction
xsupi = dri0 / (-curv) * b
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = numpy.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update numpy.dot(ri,ri) for next time.
else:
# curvature keeps increasing, bail out
msg = ("Warning: CG iterations didn't converge. The Hessian is not "
"positive definite.")
return terminate(3, msg)
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
msg = "Warning: " + _status_message['pr_loss']
return terminate(2, msg)
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(xk)
if retall:
allvecs.append(xk)
k += 1
else:
msg = _status_message['success']
return terminate(0, msg)
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple, optional
Extra arguments passed to function.
xtol : float, optional
The convergence tolerance.
maxfun : int, optional
Maximum number of function evaluations allowed.
full_output : bool, optional
If True, return optional outputs.
disp : int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Returns
-------
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Bounded' `method` in particular.
Notes
-----
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing).
Examples
--------
`fminbound` finds the minimum of the function in the given range.
The following examples illustrate the same
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fminbound(f, -1, 2)
>>> minimum
0.0
>>> minimum = optimize.fminbound(f, 1, 2)
>>> minimum
1.0000059608609866
"""
options = {'xatol': xtol,
'maxiter': maxfun,
'disp': disp}
res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
if full_output:
return res['x'], res['fun'], res['status'], res['nfev']
else:
return res['x']
def _minimize_scalar_bounded(func, bounds, args=(),
xatol=1e-5, maxiter=500, disp=0,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
xatol : float
Absolute error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
# Test bounds are of correct form
if len(bounds) != 2:
raise ValueError('bounds must have two elements.')
x1, x2 = bounds
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimisation bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step = ' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5 * (3.0 - sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean * (b - a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x, *args)
num = 1
fmin_data = (1, xf, fx)
ffulc = fnfc = fx
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if disp > 2:
print(" ")
print(header)
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))):
golden = 1
# Check for parabolic fit
if numpy.abs(e) > tol1:
golden = 0
r = (xf - nfc) * (fx - ffulc)
q = (xf - fulc) * (fx - fnfc)
p = (xf - fulc) * q - (xf - nfc) * r
q = 2.0 * (q - r)
if q > 0.0:
p = -p
q = numpy.abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and
(p < q * (b - xf))):
rat = (p + 0.0) / q
x = xf + rat
step = ' parabolic'
if ((x - a) < tol2) or ((b - x) < tol2):
si = numpy.sign(xm - xf) + ((xm - xf) == 0)
rat = tol1 * si
else: # do a golden section step
golden = 1
if golden: # Do a golden-section step
if xf >= xm:
e = a - xf
else:
e = b - xf
rat = golden_mean*e
step = ' golden'
si = numpy.sign(rat) + (rat == 0)
x = xf + si * numpy.max([numpy.abs(rat), tol1])
fu = func(x, *args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if num >= maxfun:
flag = 1
break
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xatol, disp)
result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),
message={0: 'Solution found.',
1: 'Maximum number of function calls '
'reached.'}.get(flag, ''),
x=xf, nfev=num)
return result
class Brent:
#need to rethink design of __init__
def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
full_output=0):
self.func = func
self.args = args
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
# need to rethink design of set_bracket (new options, etc)
def set_bracket(self, brack=None):
self.brack = brack
def get_bracket_info(self):
#set up
func = self.func
args = self.args
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be "
"length 2 or 3 sequence.")
### END core bracket_info code ###
return xa, xb, xc, fa, fb, fc, funcalls
def optimize(self):
# set up for optimization
func = self.func
xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#################################
x = w = v = xb
fw = fv = fx = func(*((x,) + self.args))
if (xa < xc):
a = xa
b = xc
else:
a = xc
b = xa
deltax = 0.0
funcalls = 1
iter = 0
while (iter < self.maxiter):
tol1 = self.tol * numpy.abs(x) + _mintol
tol2 = 2.0 * tol1
xmid = 0.5 * (a + b)
# check for convergence
if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)):
break
# XXX In the first iteration, rat is only bound in the true case
# of this conditional. This used to cause an UnboundLocalError
# (gh-4140). It should be set before the if (but to what?).
if (numpy.abs(deltax) <= tol1):
if (x >= xmid):
deltax = a - x # do a golden section step
else:
deltax = b - x
rat = _cg * deltax
else: # do a parabolic step
tmp1 = (x - w) * (fx - fv)
tmp2 = (x - v) * (fx - fw)
p = (x - v) * tmp2 - (x - w) * tmp1
tmp2 = 2.0 * (tmp2 - tmp1)
if (tmp2 > 0.0):
p = -p
tmp2 = numpy.abs(tmp2)
dx_temp = deltax
deltax = rat
# check parabolic fit
if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and
(numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))):
rat = p * 1.0 / tmp2 # if parabolic step is useful.
u = x + rat
if ((u - a) < tol2 or (b - u) < tol2):
if xmid - x >= 0:
rat = tol1
else:
rat = -tol1
else:
if (x >= xmid):
deltax = a - x # if it's not do a golden section step
else:
deltax = b - x
rat = _cg * deltax
if (numpy.abs(rat) < tol1): # update by at least tol1
if rat >= 0:
u = x + tol1
else:
u = x - tol1
else:
u = x + rat
fu = func(*((u,) + self.args)) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u < x):
a = u
else:
b = u
if (fu <= fw) or (w == x):
v = w
w = u
fv = fw
fw = fu
elif (fu <= fv) or (v == x) or (v == w):
v = u
fv = fu
else:
if (u >= x):
a = x
else:
b = x
v = w
w = x
x = u
fv = fw
fw = fx
fx = fu
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""
Given a function of one-variable and a possible bracket, return
the local minimum of the function isolated to a fractional precision
of tol.
Parameters
----------
func : callable f(x,*args)
Objective function.
args : tuple, optional
Additional arguments (if present).
brack : tuple, optional
Either a triple (xa,xb,xc) where xa<xb<xc and func(xb) <
func(xa), func(xc) or a pair (xa,xb) which are used as a
starting interval for a downhill bracket search (see
`bracket`). Providing the pair (xa,xb) does not always mean
the obtained solution will satisfy xa<=x<=xb.
tol : float, optional
Stop if between iteration change is less than `tol`.
full_output : bool, optional
If True, return all output args (xmin, fval, iter,
funcalls).
maxiter : int, optional
Maximum number of iterations in solution.
Returns
-------
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Brent' `method` in particular.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
Does not ensure that the minimum lies in the range specified by
`brack`. See `fminbound`.
Examples
--------
We illustrate the behaviour of the function when `brack` is of
size 2 and 3 respectively. In the case where `brack` is of the
form (xa,xb), we can see for the given values, the output need
not necessarily lie in the range (xa,xb).
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.brent(f,brack=(1,2))
>>> minimum
0.0
>>> minimum = optimize.brent(f,brack=(-1,0.5,2))
>>> minimum
-2.7755575615628914e-17
"""
options = {'xtol': tol,
'maxiter': maxiter}
res = _minimize_scalar_brent(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nit'], res['nfev']
else:
return res['x']
def _minimize_scalar_brent(func, brack=None, args=(),
xtol=1.48e-8, maxiter=500,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
_check_unknown_options(unknown_options)
tol = xtol
if tol < 0:
raise ValueError('tolerance should be >= 0, got %r' % tol)
brent = Brent(func=func, args=args, tol=tol,
full_output=True, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
x, fval, nit, nfev = brent.get_result(full_output=True)
return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev,
success=nit < maxiter)
def golden(func, args=(), brack=None, tol=_epsilon,
full_output=0, maxiter=5000):
"""
Return the minimum of a function of one variable using golden section
method.
Given a function of one variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable func(x,*args)
Objective function to minimize.
args : tuple, optional
Additional arguments (if present), passed to func.
brack : tuple, optional
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float, optional
x tolerance stop criterion
full_output : bool, optional
If True, return optional outputs.
maxiter : int
Maximum number of iterations to perform.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Golden' `method` in particular.
Notes
-----
Uses analog of bisection method to decrease the bracketed
interval.
Examples
--------
We illustrate the behaviour of the function when `brack` is of
size 2 and 3 respectively. In the case where `brack` is of the
form (xa,xb), we can see for the given values, the output need
not necessarily lie in the range ``(xa, xb)``.
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.golden(f, brack=(1, 2))
>>> minimum
1.5717277788484873e-162
>>> minimum = optimize.golden(f, brack=(-1, 0.5, 2))
>>> minimum
-1.5717277788484873e-162
"""
options = {'xtol': tol, 'maxiter': maxiter}
res = _minimize_scalar_golden(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nfev']
else:
return res['x']
def _minimize_scalar_golden(func, brack=None, args=(),
xtol=_epsilon, maxiter=5000, **unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
tol = xtol
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
_gR = 0.61803399 # golden ratio conjugate: 2.0/(1.0+sqrt(5.0))
_gC = 1.0 - _gR
x3 = xc
x0 = xa
if (numpy.abs(xc - xb) > numpy.abs(xb - xa)):
x1 = xb
x2 = xb + _gC * (xc - xb)
else:
x2 = xb
x1 = xb - _gC * (xb - xa)
f1 = func(*((x1,) + args))
f2 = func(*((x2,) + args))
funcalls += 2
nit = 0
for i in xrange(maxiter):
if numpy.abs(x3 - x0) <= tol * (numpy.abs(x1) + numpy.abs(x2)):
break
if (f2 < f1):
x0 = x1
x1 = x2
x2 = _gR * x1 + _gC * x3
f1 = f2
f2 = func(*((x2,) + args))
else:
x3 = x2
x2 = x1
x1 = _gR * x2 + _gC * x0
f2 = f1
f1 = func(*((x1,) + args))
funcalls += 1
nit += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
return OptimizeResult(fun=fval, nfev=funcalls, x=xmin, nit=nit,
success=nit < maxiter)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""
Bracket the minimum of the function.
Given a function and distinct initial points, search in the
downhill direction (as defined by the initital points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb
Parameters
----------
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float, optional
Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
args : tuple, optional
Additional arguments (if present), passed to `func`.
grow_limit : float, optional
Maximum grow limit. Defaults to 110.0
maxiter : int, optional
Maximum number of iterations to perform. Defaults to 1000.
Returns
-------
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
"""
_gold = 1.618034 # golden ratio: (1.0+sqrt(5.0))/2.0
_verysmall_num = 1e-21
fa = func(*(xa,) + args)
fb = func(*(xb,) + args)
if (fa < fb): # Switch so fa > fb
xa, xb = xb, xa
fa, fb = fb, fa
xc = xb + _gold * (xb - xa)
fc = func(*((xc,) + args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa) * (fb - fc)
tmp2 = (xb - xc) * (fb - fa)
val = tmp2 - tmp1
if numpy.abs(val) < _verysmall_num:
denom = 2.0 * _verysmall_num
else:
denom = 2.0 * val
w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom
wlim = xb + grow_limit * (xc - xb)
if iter > maxiter:
raise RuntimeError("Too many iterations.")
iter += 1
if (w - xc) * (xb - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xa = xb
xb = w
fa = fb
fb = fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(wlim - xc) >= 0.0:
w = wlim
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(xc - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xb = xc
xc = w
w = xc + _gold * (xc - xb)
fb = fc
fc = fw
fw = func(*((w,) + args))
funcalls += 1
else:
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
xa = xb
xb = xc
xc = w
fa = fb
fb = fc
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
def _linesearch_powell(func, p, xi, tol=1e-3):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method. This method
only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func.
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray, optional
Initial direction set.
xtol : float, optional
Line-search error tolerance.
ftol : float, optional
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : int, optional
Maximum number of function evaluations to make.
full_output : bool, optional
If True, fopt, xi, direc, iter, funcalls, and
warnflag are returned.
disp : bool, optional
If True, print convergence messages.
retall : bool, optional
If True, return a list of the solution at each iteration.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' `method` in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop
merely iterates over the inner loop. The inner loop minimizes
over each current direction in the direction set. At the end
of the inner loop, if certain conditions are met, the direction
that gave the largest decrease is dropped and replaced with
the difference between the current estimated x and the estimated
x from the beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin_powell(f, -1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 2
Function evaluations: 18
>>> minimum
array(0.0)
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*1000``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
direc : ndarray
Initial set of direction vectors for the Powell method.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 1000
maxfun = N * 1000
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 1000
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 1000
else:
maxfun = np.inf
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0
ilist = list(range(N))
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol * 100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20
if 2.0 * (fx - fval) <= bnd:
break
if fcalls[0] >= maxfun:
break
if iter >= maxiter:
break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol*100)
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls[0])
x = squeeze(x)
result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
"(using xtol = ", xtol, ")")
if flag == 1:
if disp:
print("\nMaximum number of function evaluations exceeded --- "
"increase maxfun argument.\n")
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
disp=False):
"""Minimize a function over a given range by brute force.
Uses the "brute force" method, i.e. computes the function's value
at each point of a multidimensional grid of points, to find the global
minimum of the function.
The function is evaluated everywhere in the range with the datatype of the
first call to the function, as enforced by the ``vectorize`` NumPy
function. The value and type of the function evaluation returned when
``full_output=True`` are affected in addition by the ``finish`` argument
(see Notes).
Parameters
----------
func : callable
The objective function to be minimized. Must be in the
form ``f(x, *args)``, where ``x`` is the argument in
the form of a 1-D array and ``args`` is a tuple of any
additional fixed parameters needed to completely specify
the function.
ranges : tuple
Each component of the `ranges` tuple must be either a
"slice object" or a range tuple of the form ``(low, high)``.
The program uses these to create the grid of points on which
the objective function will be computed. See `Note 2` for
more detail.
args : tuple, optional
Any additional fixed parameters needed to completely specify
the function.
Ns : int, optional
Number of grid points along the axes, if not otherwise
specified. See `Note2`.
full_output : bool, optional
If True, return the evaluation grid and the objective function's
values on it.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take `func` and
the initial guess as positional arguments, and take `args` as
keyword arguments. It may additionally take `full_output`
and/or `disp` as keyword arguments. Use None if no "polishing"
function is to be used. See Notes for more details.
disp : bool, optional
Set to True to print convergence messages.
Returns
-------
x0 : ndarray
A 1-D array containing the coordinates of a point at which the
objective function had its minimum value. (See `Note 1` for
which point is returned.)
fval : float
Function value at the point `x0`. (Returned when `full_output` is
True.)
grid : tuple
Representation of the evaluation grid. It has the same
length as `x0`. (Returned when `full_output` is True.)
Jout : ndarray
Function values at each point of the evaluation
grid, `i.e.`, ``Jout = func(*grid)``. (Returned
when `full_output` is True.)
See Also
--------
basinhopping, differential_evolution
Notes
-----
*Note 1*: The program finds the gridpoint at which the lowest value
of the objective function occurs. If `finish` is None, that is the
point returned. When the global minimum occurs within (or not very far
outside) the grid's boundaries, and the grid is fine enough, that
point will be in the neighborhood of the global minimum.
However, users often employ some other optimization program to
"polish" the gridpoint values, `i.e.`, to seek a more precise
(local) minimum near `brute's` best gridpoint.
The `brute` function's `finish` option provides a convenient way to do
that. Any polishing program used must take `brute's` output as its
initial guess as a positional argument, and take `brute's` input values
for `args` as keyword arguments, otherwise an error will be raised.
It may additionally take `full_output` and/or `disp` as keyword arguments.
`brute` assumes that the `finish` function returns either an
`OptimizeResult` object or a tuple in the form:
``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing
value of the argument, ``Jmin`` is the minimum value of the objective
function, "..." may be some other returned values (which are not used
by `brute`), and ``statuscode`` is the status code of the `finish` program.
Note that when `finish` is not None, the values returned are those
of the `finish` program, *not* the gridpoint ones. Consequently,
while `brute` confines its search to the input grid points,
the `finish` program's results usually will not coincide with any
gridpoint, and may fall outside the grid's boundary. Thus, if a
minimum only needs to be found over the provided grid points, make
sure to pass in `finish=None`.
*Note 2*: The grid of points is a `numpy.mgrid` object.
For `brute` the `ranges` and `Ns` inputs have the following effect.
Each component of the `ranges` tuple can be either a slice object or a
two-tuple giving a range of values, such as (0, 5). If the component is a
slice object, `brute` uses it directly. If the component is a two-tuple
range, `brute` internally converts it to a slice object that interpolates
`Ns` points from its low-value to its high-value, inclusive.
Examples
--------
We illustrate the use of `brute` to seek the global minimum of a function
of two variables that is given as the sum of a positive-definite
quadratic and two deep "Gaussian-shaped" craters. Specifically, define
the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions
are as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
Thus, the objective function may have local minima near the minimum
of each of the three functions of which it is composed. To
use `fmin` to polish its gridpoint result, we may then continue as
follows:
>>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
>>> from scipy import optimize
>>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
... finish=optimize.fmin)
>>> resbrute[0] # global minimum
array([-1.05665192, 1.80834843])
>>> resbrute[1] # function value at global minimum
-3.4085818767
Note that if `finish` had been set to None, we would have gotten the
gridpoint [-1.0 1.75] where the rounded function value is -2.892.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more "
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
def _scalarfunc(*params):
params = squeeze(asarray(params))
return func(params, *args)
vecfunc = vectorize(_scalarfunc)
grid = mgrid[lrange]
if (N == 1):
grid = (grid,)
Jout = vecfunc(*grid)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = zeros(N, int)
xmin = zeros(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
# set up kwargs for `finish` function
finish_args = _getargspec(finish).args
finish_kwargs = dict()
if 'full_output' in finish_args:
finish_kwargs['full_output'] = 1
if 'disp' in finish_args:
finish_kwargs['disp'] = disp
elif 'options' in finish_args:
# pass 'disp' as `options`
# (e.g. if `finish` is `minimize`)
finish_kwargs['options'] = {'disp': disp}
# run minimizer
res = finish(func, xmin, args=args, **finish_kwargs)
if isinstance(res, OptimizeResult):
xmin = res.x
Jmin = res.fun
success = res.success
else:
xmin = res[0]
Jmin = res[1]
success = res[-1] == 0
if not success:
if disp:
print("Warning: Either final optimization did not succeed "
"or `finish` does not return `statuscode` as its last "
"argument.")
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
def show_options(solver=None, method=None, disp=True):
"""
Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of 'minimize', 'minimize_scalar',
'root', or 'linprog'.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g. 'BFGS' for
'minimize').
disp : bool, optional
Whether to print the result rather than returning it.
Returns
-------
text
Either None (for disp=False) or the text string (disp=True)
Notes
-----
The solver-specific methods are:
`scipy.optimize.minimize`
- :ref:`Nelder-Mead <optimize.minimize-neldermead>`
- :ref:`Powell <optimize.minimize-powell>`
- :ref:`CG <optimize.minimize-cg>`
- :ref:`BFGS <optimize.minimize-bfgs>`
- :ref:`Newton-CG <optimize.minimize-newtoncg>`
- :ref:`L-BFGS-B <optimize.minimize-lbfgsb>`
- :ref:`TNC <optimize.minimize-tnc>`
- :ref:`COBYLA <optimize.minimize-cobyla>`
- :ref:`SLSQP <optimize.minimize-slsqp>`
- :ref:`dogleg <optimize.minimize-dogleg>`
- :ref:`trust-ncg <optimize.minimize-trustncg>`
`scipy.optimize.root`
- :ref:`hybr <optimize.root-hybr>`
- :ref:`lm <optimize.root-lm>`
- :ref:`broyden1 <optimize.root-broyden1>`
- :ref:`broyden2 <optimize.root-broyden2>`
- :ref:`anderson <optimize.root-anderson>`
- :ref:`linearmixing <optimize.root-linearmixing>`
- :ref:`diagbroyden <optimize.root-diagbroyden>`
- :ref:`excitingmixing <optimize.root-excitingmixing>`
- :ref:`krylov <optimize.root-krylov>`
- :ref:`df-sane <optimize.root-dfsane>`
`scipy.optimize.minimize_scalar`
- :ref:`brent <optimize.minimize_scalar-brent>`
- :ref:`golden <optimize.minimize_scalar-golden>`
- :ref:`bounded <optimize.minimize_scalar-bounded>`
`scipy.optimize.linprog`
- :ref:`simplex <optimize.linprog-simplex>`
"""
import textwrap
doc_routines = {
'minimize': (
('bfgs', 'scipy.optimize.optimize._minimize_bfgs'),
('cg', 'scipy.optimize.optimize._minimize_cg'),
('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'),
('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),
('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'),
('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'),
('newtoncg', 'scipy.optimize.optimize._minimize_newtoncg'),
('powell', 'scipy.optimize.optimize._minimize_powell'),
('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'),
('tnc', 'scipy.optimize.tnc._minimize_tnc'),
('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),
),
'root': (
('hybr', 'scipy.optimize.minpack._root_hybr'),
('lm', 'scipy.optimize._root._root_leastsq'),
('broyden1', 'scipy.optimize._root._root_broyden1_doc'),
('broyden2', 'scipy.optimize._root._root_broyden2_doc'),
('anderson', 'scipy.optimize._root._root_anderson_doc'),
('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),
('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),
('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),
('krylov', 'scipy.optimize._root._root_krylov_doc'),
('df-sane', 'scipy.optimize._spectral._root_df_sane'),
),
'linprog': (
('simplex', 'scipy.optimize._linprog._linprog_simplex'),
),
'minimize_scalar': (
('brent', 'scipy.optimize.optimize._minimize_scalar_brent'),
('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'),
('golden', 'scipy.optimize.optimize._minimize_scalar_golden'),
),
}
if solver is None:
text = ["\n\n\n========\n", "minimize\n", "========\n"]
text.append(show_options('minimize', disp=False))
text.extend(["\n\n===============\n", "minimize_scalar\n",
"===============\n"])
text.append(show_options('minimize_scalar', disp=False))
text.extend(["\n\n\n====\n", "root\n",
"====\n"])
text.append(show_options('root', disp=False))
text.extend(['\n\n\n=======\n', 'linprog\n',
'=======\n'])
text.append(show_options('linprog', disp=False))
text = "".join(text)
else:
solver = solver.lower()
if solver not in doc_routines:
raise ValueError('Unknown solver %r' % (solver,))
if method is None:
text = []
for name, _ in doc_routines[solver]:
text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"])
text.append(show_options(solver, name, disp=False))
text = "".join(text)
else:
methods = dict(doc_routines[solver])
if method not in methods:
raise ValueError("Unknown method %r" % (method,))
name = methods[method]
# Import function object
parts = name.split('.')
mod_name = ".".join(parts[:-1])
__import__(mod_name)
obj = getattr(sys.modules[mod_name], parts[-1])
# Get doc
doc = obj.__doc__
if doc is not None:
text = textwrap.dedent(doc).strip()
else:
text = ""
if disp:
print(text)
return
else:
return text
def main():
import time
times = []
algor = []
x0 = [0.8, 1.2, 0.7]
print("Nelder-Mead Simplex")
print("===================")
start = time.time()
x = fmin(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print()
print("Powell Direction Set Method")
print("===========================")
start = time.time()
x = fmin_powell(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print()
print("Nonlinear CG")
print("============")
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print(x)
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print()
print("BFGS Quasi-Newton")
print("=================")
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print()
print("BFGS approximate gradient")
print("=========================")
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print(x)
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print()
print("Newton-CG with Hessian product")
print("==============================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print()
print("Newton-CG with full Hessian")
print("===========================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with full hessian')
print()
print("\nMinimizing the Rosenbrock function of order 3\n")
print(" Algorithm \t\t\t Seconds")
print("===========\t\t\t =========")
for k in range(len(algor)):
print(algor[k], "\t -- ", times[k])
if __name__ == "__main__":
main()
|
apbard/scipy
|
scipy/optimize/optimize.py
|
Python
|
bsd-3-clause
| 105,325
|
"""
Custom Authenticator to use generic OAuth2 with JupyterHub
"""
import base64
import os
from urllib.parse import urlencode
from jupyterhub.auth import LocalAuthenticator
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPRequest
from tornado.httputil import url_concat
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import List
from traitlets import Unicode
from traitlets import Union
from .oauth2 import OAuthenticator
from .traitlets import Callable
class GenericOAuthenticator(OAuthenticator):
login_service = Unicode("OAuth 2.0", config=True)
extra_params = Dict(help="Extra parameters for first POST request").tag(config=True)
claim_groups_key = Union(
[Unicode(os.environ.get('OAUTH2_GROUPS_KEY', 'groups')), Callable()],
config=True,
help="""
Userdata groups claim key from returned json for USERDATA_URL.
Can be a string key name or a callable that accepts the returned
json (as a dict) and returns the groups list. The callable is useful
e.g. for extracting the groups from a nested object in the response.
""",
)
allowed_groups = List(
Unicode(),
config=True,
help="Automatically allow members of selected groups",
)
admin_groups = List(
Unicode(),
config=True,
help="Groups whose members should have Jupyterhub admin privileges",
)
username_key = Union(
[Unicode(os.environ.get('OAUTH2_USERNAME_KEY', 'username')), Callable()],
config=True,
help="""
Userdata username key from returned json for USERDATA_URL.
Can be a string key name or a callable that accepts the returned
json (as a dict) and returns the username. The callable is useful
e.g. for extracting the username from a nested object in the
response.
""",
)
userdata_params = Dict(
help="Userdata params to get user data login information"
).tag(config=True)
userdata_token_method = Unicode(
os.environ.get('OAUTH2_USERDATA_REQUEST_TYPE', 'header'),
config=True,
help="Method for sending access token in userdata request. Supported methods: header, url. Default: header",
)
tls_verify = Bool(
os.environ.get('OAUTH2_TLS_VERIFY', 'True').lower() in {'true', '1'},
config=True,
help="Disable TLS verification on http request",
)
basic_auth = Bool(
os.environ.get('OAUTH2_BASIC_AUTH', 'True').lower() in {'true', '1'},
config=True,
help="Disable basic authentication for access token request",
)
@default("http_client")
def _default_http_client(self):
return AsyncHTTPClient(
force_instance=True, defaults=dict(validate_cert=self.tls_verify)
)
def _get_headers(self):
headers = {"Accept": "application/json", "User-Agent": "JupyterHub"}
if self.basic_auth:
b64key = base64.b64encode(
bytes("{}:{}".format(self.client_id, self.client_secret), "utf8")
)
headers.update({"Authorization": "Basic {}".format(b64key.decode("utf8"))})
return headers
def _get_token(self, headers, params):
if self.token_url:
url = self.token_url
else:
raise ValueError("Please set the $OAUTH2_TOKEN_URL environment variable")
req = HTTPRequest(
url,
method="POST",
headers=headers,
body=urlencode(params),
)
return self.fetch(req, "fetching access token")
def _get_user_data(self, token_response):
access_token = token_response['access_token']
token_type = token_response['token_type']
# Determine who the logged in user is
headers = {
"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "{} {}".format(token_type, access_token),
}
if self.userdata_url:
url = url_concat(self.userdata_url, self.userdata_params)
else:
raise ValueError("Please set the OAUTH2_USERDATA_URL environment variable")
if self.userdata_token_method == "url":
url = url_concat(self.userdata_url, dict(access_token=access_token))
req = HTTPRequest(url, headers=headers)
return self.fetch(req, "fetching user data")
@staticmethod
def _create_auth_state(token_response, user_data_response):
access_token = token_response['access_token']
refresh_token = token_response.get('refresh_token', None)
scope = token_response.get('scope', '')
if isinstance(scope, str):
scope = scope.split(' ')
return {
'access_token': access_token,
'refresh_token': refresh_token,
'oauth_user': user_data_response,
'scope': scope,
}
@staticmethod
def check_user_in_groups(member_groups, allowed_groups):
return bool(set(member_groups) & set(allowed_groups))
async def authenticate(self, handler, data=None):
code = handler.get_argument("code")
params = dict(
redirect_uri=self.get_callback_url(handler),
code=code,
grant_type='authorization_code',
)
params.update(self.extra_params)
headers = self._get_headers()
token_resp_json = await self._get_token(headers, params)
user_data_resp_json = await self._get_user_data(token_resp_json)
if callable(self.username_key):
name = self.username_key(user_data_resp_json)
else:
name = user_data_resp_json.get(self.username_key)
if not name:
self.log.error(
"OAuth user contains no key %s: %s",
self.username_key,
user_data_resp_json,
)
return
user_info = {
'name': name,
'auth_state': self._create_auth_state(token_resp_json, user_data_resp_json),
}
if self.allowed_groups:
self.log.info(
'Validating if user claim groups match any of {}'.format(
self.allowed_groups
)
)
if callable(self.claim_groups_key):
groups = self.claim_groups_key(user_data_resp_json)
else:
groups = user_data_resp_json.get(self.claim_groups_key)
if not groups:
self.log.error(
"No claim groups found for user! Something wrong with the `claim_groups_key` {}? {}".format(
self.claim_groups_key, user_data_resp_json
)
)
groups = []
if self.check_user_in_groups(groups, self.allowed_groups):
user_info['admin'] = self.check_user_in_groups(
groups, self.admin_groups
)
else:
user_info = None
return user_info
class LocalGenericOAuthenticator(LocalAuthenticator, GenericOAuthenticator):
"""A version that mixes in local system user creation"""
pass
|
jupyterhub/oauthenticator
|
oauthenticator/generic.py
|
Python
|
bsd-3-clause
| 7,328
|
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .models import Profile
from .forms import ProfileForm
@login_required
def profile_edit(request):
next = request.GET.get("next")
profile, created = Profile.objects.get_or_create(
user=request.user,
defaults={
"first_name": request.user.first_name,
"last_name": request.user.last_name,
}
)
if request.method == "POST":
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
profile = form.save()
request.user.first_name = form.cleaned_data["first_name"]
request.user.last_name = form.cleaned_data["last_name"]
messages.add_message(request, messages.SUCCESS,
"Successfully updated profile."
)
if next:
return redirect(next)
else:
form = ProfileForm(instance=profile)
return render(request, "profiles/edit.html", {
"form": form,
"next": next,
})
|
eldarion/pycon
|
pycon/profile/views.py
|
Python
|
bsd-3-clause
| 1,142
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
from os import path
import re
import ast
here = path.abspath(path.dirname(__file__))
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('radmyarchive/__init__.py', 'rb') as vf:
version = str(ast.literal_eval(_version_re.search(
vf.read().decode('utf-8')).group(1)))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
readme_file = f.read()
setup(
name="radmyarchive",
version=version,
author="Ömer Fadıl Usta",
author_email="omerusta@gmail.com",
packages=find_packages(),
scripts=["scripts/RADMYARCHIVE.py"],
url="https://github.com/usta/radmyarchive-py",
license="BSD",
keywords="exif image photo rename metadata arrange rearrange catalogue",
description="A simple photo rearranger with help of EXIF tags",
install_requires=['exifread', 'termcolor', 'colorama'],
long_description=readme_file,
classifiers=(
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.4",
"Topic :: Utilities",
),
)
|
usta/radmyarchive-py
|
setup.py
|
Python
|
bsd-3-clause
| 1,376
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sms', '0003_add_backend_models'),
]
operations = [
migrations.CreateModel(
name='SQLIVRBackend',
fields=[
],
options={
'proxy': True,
},
bases=('sms.sqlmobilebackend',),
),
migrations.CreateModel(
name='SQLKooKooBackend',
fields=[
],
options={
'proxy': True,
},
bases=('sms.sqlivrbackend',),
),
]
|
dimagi/commcare-hq
|
corehq/apps/sms/migrations/0004_add_sqlivrbackend_sqlkookoobackend.py
|
Python
|
bsd-3-clause
| 624
|
from h5py import tests
from h5py import *
class TestCreate(tests.HTest):
def setUp(self):
self.fid, self.name = tests.gettemp()
def tearDown(self):
import os
self.fid.close()
os.unlink(self.name)
@tests.require(api=18)
def test_create_anon(self):
""" (H5D) Anonymous dataset creation """
sid = h5s.create_simple((10,10))
dsid = h5d.create(self.fid, None, h5t.STD_I32LE, sid)
self.assert_(dsid)
self.assertIsInstance(dsid, h5d.DatasetID)
|
qsnake/h5py
|
h5py/tests/low/test_h5d.py
|
Python
|
bsd-3-clause
| 529
|
''' Script to convert the raw data and to plot all histograms'''
from __future__ import division
import logging
import warnings
import os
import multiprocessing as mp
from functools import partial
from matplotlib.backends.backend_pdf import PdfPages
import tables as tb
from tables import dtype_from_descr, Col
import numpy as np
from scipy.optimize import curve_fit, OptimizeWarning
from scipy.special import erf
import progressbar
from pixel_clusterizer.clusterizer import HitClusterizer
from pybar_fei4_interpreter.data_interpreter import PyDataInterpreter
from pybar_fei4_interpreter.data_histograming import PyDataHistograming
from pybar_fei4_interpreter import data_struct
from pybar_fei4_interpreter import analysis_utils as fast_analysis_utils
from pybar.analysis import analysis_utils
from pybar.analysis.plotting import plotting
from pybar.analysis.analysis_utils import check_bad_data, fix_raw_data, consecutive
from pybar.daq.readout_utils import is_fe_word, is_data_header, is_trigger_word, logical_and
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
def scurve(x, A, mu, sigma):
return 0.5 * A * erf((x - mu) / (np.sqrt(2) * sigma)) + 0.5 * A
def fit_scurve(scurve_data, PlsrDAC): # data of some pixels to fit, has to be global for the multiprocessing module
index = np.argmax(np.diff(scurve_data))
max_occ = np.median(scurve_data[index:])
threshold = PlsrDAC[index]
if abs(max_occ) <= 1e-08: # or index == 0: occupancy is zero or close to zero
popt = [0, 0, 0]
else:
try:
popt, _ = curve_fit(scurve, PlsrDAC, scurve_data, p0=[max_occ, threshold, 2.5], check_finite=False)
except RuntimeError: # fit failed
popt = [0, 0, 0]
if popt[1] < 0: # threshold < 0 rarely happens if fit does not work
popt = [0, 0, 0]
return popt[1:3]
class AnalyzeRawData(object):
"""A class to analyze FE-I4 raw data"""
def __init__(self, raw_data_file=None, analyzed_data_file=None, create_pdf=True, scan_parameter_name=None):
'''Initialize the AnalyzeRawData object:
- The c++ objects (Interpreter, Histogrammer, Clusterizer) are constructed
- Create one scan parameter table from all provided raw data files
- Create PdfPages object if needed
Parameters
----------
raw_data_file : string or list, tuple, set of strings
Filename or a list of filenames of the raw data file(s) and analyzed_data_file will be overwritten.
If None and if analyzed_data_file is not None, any existing analyzed_data_file will be opened, otherwise created.
Filename extension (.h5) does not need to be provided.
analyzed_data_file : string
The file name of the output analyzed data file.
If None and if raw_data_file is not None, the filename will be generated from the raw_data_file.
Filename extension (.h5) does not need to be provided.
create_pdf : boolean
If True, plots will be written into a PDF file. Will be set to False, if raw_data_file is None.
scan_parameter_name : string or iterable
The name/names of scan parameter(s) to be used during analysis. If None, the scan parameter
table is used to extract the scan parameters. Otherwise no scan parameter is set.
'''
self.interpreter = PyDataInterpreter()
self.histogram = PyDataHistograming()
raw_data_files = []
if isinstance(raw_data_file, basestring):
# normalize path
raw_data_file = os.path.abspath(raw_data_file)
f_list = analysis_utils.get_data_file_names_from_scan_base(raw_data_file, sort_by_time=True, meta_data_v2=self.interpreter.meta_table_v2)
if f_list:
raw_data_files = f_list
else:
if os.path.splitext(raw_data_file)[1].lower() != ".h5":
raw_data_files.append(os.path.splitext(raw_data_file)[0] + ".h5")
else:
raw_data_files.append(raw_data_file)
elif isinstance(raw_data_file, (list, tuple, set)): # iterable of raw data files
for one_raw_data_file in raw_data_file:
# normalize path
one_raw_data_file = os.path.abspath(one_raw_data_file)
if os.path.splitext(one_raw_data_file)[1].lower() != ".h5":
raw_data_files.append(os.path.splitext(one_raw_data_file)[0] + ".h5")
else:
raw_data_files.append(one_raw_data_file)
else:
raw_data_files = None
if analyzed_data_file is not None:
# normalize path
analyzed_data_file = os.path.abspath(analyzed_data_file)
if os.path.splitext(analyzed_data_file)[1].lower() != ".h5":
self._analyzed_data_file = os.path.splitext(analyzed_data_file)[0] + ".h5"
else: # iterable of raw data files
self._analyzed_data_file = analyzed_data_file
else:
if raw_data_file is not None:
if isinstance(raw_data_file, basestring):
self._analyzed_data_file = os.path.splitext(raw_data_file)[0] + '_interpreted.h5'
else: # iterable of raw data files
commonprefix = os.path.commonprefix(raw_data_files)
if commonprefix:
# use common string for output filename
one_raw_data_file = os.path.abspath(commonprefix)
else:
# take 1st filename for output filename
one_raw_data_file = os.path.abspath(raw_data_files[0])
self._analyzed_data_file = os.path.splitext(one_raw_data_file)[0] + '_interpreted.h5'
else:
self._analyzed_data_file = None
# create a scan parameter table from all raw data files
if raw_data_files is not None:
self.files_dict = analysis_utils.get_parameter_from_files(raw_data_files, parameters=scan_parameter_name)
if not analysis_utils.check_parameter_similarity(self.files_dict):
raise analysis_utils.NotSupportedError('Different scan parameters in multiple files are not supported.')
self.scan_parameters = analysis_utils.create_parameter_table(self.files_dict)
scan_parameter_names = analysis_utils.get_scan_parameter_names(self.scan_parameters)
logging.info('Scan parameter(s) from raw data file(s): %s', (', ').join(scan_parameter_names) if scan_parameter_names else 'None',)
else:
self.files_dict = None
self.scan_parameters = None
self.out_file_h5 = None
self.set_standard_settings()
if self._analyzed_data_file is not None:
if raw_data_file is None:
# assume that output file already exists containing analyzed raw data
self.out_file_h5 = tb.open_file(self._analyzed_data_file, mode="a", title="Interpreted FE-I4 raw data")
else:
# raw data files are given, overwrite any existing file
self.out_file_h5 = tb.open_file(self._analyzed_data_file, mode="w", title="Interpreted FE-I4 raw data")
if raw_data_file is not None and create_pdf:
if isinstance(raw_data_file, basestring):
output_pdf_filename = os.path.splitext(raw_data_file)[0] + ".pdf"
else: # iterable of raw data files
one_raw_data_file = os.path.abspath(raw_data_files[0])
output_pdf_filename = os.path.splitext(one_raw_data_file)[0] + ".pdf"
logging.info('Opening output PDF file: %s', output_pdf_filename)
self.output_pdf = PdfPages(output_pdf_filename)
else:
self.output_pdf = None
self._scan_parameter_name = scan_parameter_name
self._settings_from_file_set = False # the scan settings are in a list of files only in the first one, thus set this flag to suppress warning for other files
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def _setup_clusterizer(self):
# Define all field names and data types
hit_fields = {'event_number': 'event_number',
'column': 'column',
'row': 'row',
'relative_BCID': 'frame',
'tot': 'charge',
'LVL1ID': 'LVL1ID',
'trigger_number': 'trigger_number',
'trigger_time_stamp': 'trigger_time_stamp',
'BCID': 'BCID',
'TDC': 'TDC',
'TDC_time_stamp': 'TDC_time_stamp',
'trigger_status': 'trigger_status',
'service_record': 'service_record',
'event_status': 'event_status'
}
hit_dtype = np.dtype([('event_number', '<i8'),
('trigger_number', '<u4'),
('trigger_time_stamp', '<u4'),
('relative_BCID', '<u1'),
('LVL1ID', '<u2'),
('column', '<u1'),
('row', '<u2'),
('tot', '<u1'),
('BCID', '<u2'),
('TDC', '<u2'),
('TDC_time_stamp', '<u1'),
('trigger_status', '<u1'),
('service_record', '<u4'),
('event_status', '<u2')])
cluster_fields = {'event_number': 'event_number',
'column': 'column',
'row': 'row',
'size': 'n_hits',
'ID': 'ID',
'tot': 'charge',
'seed_column': 'seed_column',
'seed_row': 'seed_row',
'mean_column': 'mean_column',
'mean_row': 'mean_row'}
cluster_dtype = np.dtype([('event_number', '<i8'),
('ID', '<u2'),
('size', '<u2'),
('tot', '<u2'),
('seed_column', '<u1'),
('seed_row', '<u2'),
('mean_column', '<f4'),
('mean_row', '<f4'),
('event_status', '<u2')])
# Initialize clusterizer with custom hit/cluster fields
self.clusterizer = HitClusterizer(
hit_fields=hit_fields,
hit_dtype=hit_dtype,
cluster_fields=cluster_fields,
cluster_dtype=cluster_dtype,
min_hit_charge=0,
max_hit_charge=13,
column_cluster_distance=2,
row_cluster_distance=3,
frame_cluster_distance=2,
ignore_same_hits=True)
# Set the cluster event status from the hit event status
def end_of_cluster_function(hits, clusters, cluster_size, cluster_hit_indices, cluster_index, cluster_id, charge_correction, noisy_pixels, disabled_pixels, seed_hit_index):
clusters[cluster_index].event_status = hits[seed_hit_index].event_status
# Set the new function to the clusterizer
self.clusterizer.set_end_of_cluster_function(end_of_cluster_function)
def close(self):
del self.interpreter
del self.histogram
del self.clusterizer
self._close_h5()
self._close_pdf()
def _close_h5(self):
if self.is_open(self.out_file_h5):
self.out_file_h5.close()
self.out_file_h5 = None
def _close_pdf(self):
if self.output_pdf is not None:
logging.info('Closing output PDF file: %s', str(self.output_pdf._file.fh.name))
self.output_pdf.close()
self.output_pdf = None
def set_standard_settings(self):
'''Set all settings to their standard values.
'''
if self.is_open(self.out_file_h5):
self.out_file_h5.close()
self.out_file_h5 = None
self._setup_clusterizer()
self.chunk_size = 3000000
self.n_injections = None
self.trig_count = 0 # 0 trig_count = 16 BCID per trigger
self.max_tot_value = 13
self.vcal_c0, self.vcal_c1 = None, None
self.c_low, self.c_mid, self.c_high = None, None, None
self.c_low_mask, self.c_high_mask = None, None
self._filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False)
warnings.simplefilter("ignore", OptimizeWarning)
self.meta_event_index = None
self.fei4b = False
self.create_hit_table = False
self.create_empty_event_hits = False
self.create_meta_event_index = True
self.create_tot_hist = True
self.create_mean_tot_hist = False
self.create_tot_pixel_hist = True
self.create_rel_bcid_hist = True
self.correct_corrupted_data = False
self.create_error_hist = True
self.create_service_record_hist = True
self.create_occupancy_hist = True
self.create_meta_word_index = False
self.create_source_scan_hist = False
self.create_tdc_hist = False
self.create_tdc_counter_hist = False
self.create_tdc_pixel_hist = False
self.create_trigger_error_hist = False
self.create_threshold_hists = False
self.create_threshold_mask = True # Threshold/noise histogram mask: masking all pixels out of bounds
self.create_fitted_threshold_mask = True # Fitted threshold/noise histogram mask: masking all pixels out of bounds
self.create_fitted_threshold_hists = False
self.create_cluster_hit_table = False
self.create_cluster_table = False
self.create_cluster_size_hist = False
self.create_cluster_tot_hist = False
self.align_at_trigger = False # use the trigger word to align the events
self.align_at_tdc = False # use the trigger word to align the events
self.trigger_data_format = 0 # 0: 31bit trigger number, 1: 31bit trigger time stamp, 2: 15bit trigger time stamp + 16bit trigger number
self.use_tdc_trigger_time_stamp = False # the tdc time stamp is the difference between trigger and tdc rising edge
self.max_tdc_delay = 255
self.max_trigger_number = 2 ** 16 - 1
self.set_stop_mode = False # The FE is read out with stop mode, therefore the BCID plot is different
def reset(self):
'''Reset the c++ libraries for new analysis.
'''
self.interpreter.reset()
self.histogram.reset()
@property
def chunk_size(self):
return self._chunk_size
@chunk_size.setter
def chunk_size(self, value):
self.interpreter.set_hit_array_size(2 * value) # worst case: one raw data word becoming 2 hit words
self._chunk_size = value
@property
def create_hit_table(self):
return self._create_hit_table
@create_hit_table.setter
def create_hit_table(self, value):
self._create_hit_table = value
@property
def create_empty_event_hits(self):
return self._create_empty_event_hits
@create_empty_event_hits.setter
def create_empty_event_hits(self, value):
self._create_empty_event_hits = value
self.interpreter.create_empty_event_hits(value)
@property
def create_occupancy_hist(self):
return self._create_occupancy_hist
@create_occupancy_hist.setter
def create_occupancy_hist(self, value):
self._create_occupancy_hist = value
self.histogram.create_occupancy_hist(value)
@property
def create_mean_tot_hist(self):
return self._create_mean_tot_hist
@create_mean_tot_hist.setter
def create_mean_tot_hist(self, value):
self._create_mean_tot_hist = value
self.histogram.create_mean_tot_hist(value)
@property
def create_source_scan_hist(self):
return self._create_source_scan_hist
@create_source_scan_hist.setter
def create_source_scan_hist(self, value):
self._create_source_scan_hist = value
@property
def create_tot_hist(self):
return self.create_tot_hist
@create_tot_hist.setter
def create_tot_hist(self, value):
self._create_tot_hist = value
self.histogram.create_tot_hist(value)
@property
def create_tdc_hist(self):
return self._create_tdc_hist
@create_tdc_hist.setter
def create_tdc_hist(self, value):
self._create_tdc_hist = value
self.histogram.create_tdc_hist(value)
@property
def create_tdc_pixel_hist(self):
return self._create_tdc_pixel_hist
@create_tdc_pixel_hist.setter
def create_tdc_pixel_hist(self, value):
self._create_tdc_pixel_hist = value
self.histogram.create_tdc_pixel_hist(value)
@property
def create_tot_pixel_hist(self):
return self._create_tot_pixel_hist
@create_tot_pixel_hist.setter
def create_tot_pixel_hist(self, value):
self._create_tot_pixel_hist = value
self.histogram.create_tot_pixel_hist(value)
@property
def create_rel_bcid_hist(self):
return self._create_rel_bcid_hist
@create_rel_bcid_hist.setter
def create_rel_bcid_hist(self, value):
self._create_rel_bcid_hist = value
self.histogram.create_rel_bcid_hist(value)
@property
def create_threshold_hists(self):
return self._create_threshold_hists
@create_threshold_hists.setter
def create_threshold_hists(self, value):
self._create_threshold_hists = value
@property
def create_threshold_mask(self):
return self._create_threshold_mask
@create_threshold_mask.setter
def create_threshold_mask(self, value):
self._create_threshold_mask = value
@property
def create_fitted_threshold_mask(self):
return self._create_fitted_threshold_mask
@create_fitted_threshold_mask.setter
def create_fitted_threshold_mask(self, value):
self._create_fitted_threshold_mask = value
@property
def create_fitted_threshold_hists(self):
return self._create_fitted_threshold_hists
@create_fitted_threshold_hists.setter
def create_fitted_threshold_hists(self, value):
self._create_fitted_threshold_hists = value
@property
def correct_corrupted_data(self):
return self._correct_corrupted_data
@correct_corrupted_data.setter
def correct_corrupted_data(self, value):
self._correct_corrupted_data = value
@property
def create_error_hist(self):
return self._create_error_hist
@create_error_hist.setter
def create_error_hist(self, value):
self._create_error_hist = value
@property
def create_trigger_error_hist(self):
return self._create_trigger_error_hist
@create_trigger_error_hist.setter
def create_trigger_error_hist(self, value):
self._create_trigger_error_hist = value
@property
def create_service_record_hist(self):
return self._create_service_record_hist
@create_service_record_hist.setter
def create_service_record_hist(self, value):
self._create_service_record_hist = value
@property
def create_tdc_counter_hist(self):
return self._create_tdc_counter_hist
@create_tdc_counter_hist.setter
def create_tdc_counter_hist(self, value):
self._create_tdc_counter_hist = value
@property
def create_meta_event_index(self):
return self._create_meta_event_index
@create_meta_event_index.setter
def create_meta_event_index(self, value):
self._create_meta_event_index = value
@property
def create_meta_word_index(self):
return self._create_meta_word_index
@create_meta_word_index.setter
def create_meta_word_index(self, value):
self._create_meta_word_index = value
self.interpreter.create_meta_data_word_index(value)
@property
def fei4b(self):
return self._fei4b
@fei4b.setter
def fei4b(self, value):
self._fei4b = value
self.interpreter.set_FEI4B(value)
@property
def n_injections(self):
"""Get the numbers of injections per pixel."""
return self._n_injection
@n_injections.setter
def n_injections(self, value):
"""Set the numbers of injections per pixel."""
self._n_injection = value
@property
def trig_count(self):
"""Get the numbers of BCIDs (usually 16) of one event."""
return self._trig_count
@trig_count.setter
def trig_count(self, value):
"""Set the numbers of BCIDs (usually 16) of one event."""
self._trig_count = 16 if value == 0 else value
self.interpreter.set_trig_count(self._trig_count)
@property
def max_tot_value(self):
"""Get maximum ToT value that is considered to be a hit"""
return self._max_tot_value
@max_tot_value.setter
def max_tot_value(self, value):
"""Set maximum ToT value that is considered to be a hit"""
self._max_tot_value = value
self.interpreter.set_max_tot(self._max_tot_value)
self.histogram.set_max_tot(self._max_tot_value)
self.clusterizer.set_max_hit_charge(self._max_tot_value)
@property
def create_cluster_hit_table(self):
return self._create_cluster_hit_table
@create_cluster_hit_table.setter
def create_cluster_hit_table(self, value):
self._create_cluster_hit_table = value
@property
def create_cluster_table(self):
return self._create_cluster_table
@create_cluster_table.setter
def create_cluster_table(self, value):
self._create_cluster_table = value
@property
def create_cluster_size_hist(self):
return self._create_cluster_size_hist
@create_cluster_size_hist.setter
def create_cluster_size_hist(self, value):
self._create_cluster_size_hist = value
@property
def create_cluster_tot_hist(self):
return self._create_cluster_tot_hist
@create_cluster_tot_hist.setter
def create_cluster_tot_hist(self, value):
self._create_cluster_tot_hist = value
@property
def align_at_trigger(self):
return self._align_at_trigger
@align_at_trigger.setter
def align_at_trigger(self, value):
self._align_at_trigger = value
self.interpreter.align_at_trigger(value)
@property
def align_at_tdc(self):
return self._align_at_tdc
@align_at_tdc.setter
def align_at_tdc(self, value):
self._align_at_tdc = value
self.interpreter.align_at_tdc(value)
@property
def trigger_data_format(self):
return self._trigger_data_format
@trigger_data_format.setter
def trigger_data_format(self, value):
self._trigger_data_format = value
self.interpreter.set_trigger_data_format(value)
@property
def use_tdc_trigger_time_stamp(self):
return self._use_tdc_trigger_time_stamp
@use_tdc_trigger_time_stamp.setter
def use_tdc_trigger_time_stamp(self, value):
self._use_tdc_trigger_time_stamp = value
self.interpreter.use_tdc_trigger_time_stamp(value)
@property
def max_tdc_delay(self):
return self._max_tdc_delay
@max_tdc_delay.setter
def max_tdc_delay(self, value):
self._max_tdc_delay = value
self.interpreter.set_max_tdc_delay(value)
@property
def max_trigger_number(self):
return self._max_trigger_number
@max_trigger_number.setter
def max_trigger_number(self, value):
self._max_trigger_number = value
self.interpreter.set_max_trigger_number(value)
@property
def set_stop_mode(self):
return self._set_stop_mode
@set_stop_mode.setter
def set_stop_mode(self, value):
self._set_stop_mode = value
def interpret_word_table(self, analyzed_data_file=None, use_settings_from_file=True, fei4b=None):
'''Interprets the raw data word table of all given raw data files with the c++ library.
Creates the h5 output file and PDF plots.
Parameters
----------
analyzed_data_file : string
The file name of the output analyzed data file. If None, the output analyzed data file
specified during initialization is taken.
use_settings_from_file : boolean
True if the needed parameters should be extracted from the raw data file
fei4b : boolean
True if the raw data is from FE-I4B.
'''
logging.info('Interpreting raw data file(s): ' + (', ').join(self.files_dict.keys()))
if self._create_meta_word_index:
meta_word = np.empty((self._chunk_size,), dtype=dtype_from_descr(data_struct.MetaInfoWordTable))
self.interpreter.set_meta_data_word_index(meta_word)
self.interpreter.reset_event_variables()
self.interpreter.reset_counters()
self.meta_data = analysis_utils.combine_meta_data(self.files_dict, meta_data_v2=self.interpreter.meta_table_v2)
if self.meta_data is None or self.meta_data.shape[0] == 0:
raise analysis_utils.IncompleteInputError('Meta data is empty. Stopping interpretation.')
self.interpreter.set_meta_data(self.meta_data) # tell interpreter the word index per readout to be able to calculate the event number per read out
meta_data_size = self.meta_data.shape[0]
self.meta_event_index = np.zeros((meta_data_size,), dtype=[('metaEventIndex', np.uint64)]) # this array is filled by the interpreter and holds the event number per read out
self.interpreter.set_meta_event_data(self.meta_event_index) # tell the interpreter the data container to write the meta event index to
if self.scan_parameters is None:
self.histogram.set_no_scan_parameter()
else:
self.scan_parameter_index = analysis_utils.get_scan_parameters_index(self.scan_parameters) # a array that labels unique scan parameter combinations
self.histogram.add_scan_parameter(self.scan_parameter_index) # just add an index for the different scan parameter combinations
if self._create_cluster_size_hist: # Cluster size result histogram
self._cluster_size_hist = np.zeros(shape=(6, ), dtype=np.uint32)
if self._create_cluster_tot_hist: # Cluster tot/size result histogram
self._cluster_tot_hist = np.zeros(shape=(16, 6), dtype=np.uint32)
close_analyzed_data_file = False
if analyzed_data_file is not None: # if an output file name is specified create new file for analyzed data
if self.is_open(self.out_file_h5) and os.path.abspath(analyzed_data_file) == os.path.abspath(self.out_file_h5.filename):
out_file_h5 = self.out_file_h5
else:
# normalize path
analyzed_data_file = os.path.abspath(analyzed_data_file)
if os.path.splitext(analyzed_data_file)[1].lower() != ".h5":
analyzed_data_file = os.path.splitext(analyzed_data_file)[0] + ".h5"
out_file_h5 = tb.open_file(analyzed_data_file, mode="w", title="Interpreted FE-I4 raw data")
close_analyzed_data_file = True
elif self.is_open(self.out_file_h5):
out_file_h5 = self.out_file_h5
else:
out_file_h5 = None
tmp_out_file_h5 = self.out_file_h5
if not self.is_open(self.out_file_h5) and self.is_open(out_file_h5):
close_analyzed_data_file = False
tmp_out_file_h5 = out_file_h5
self.out_file_h5 = out_file_h5
if self.is_open(self.out_file_h5):
self._analyzed_data_file = self.out_file_h5.filename
else:
self._analyzed_data_file is None
if self._analyzed_data_file is not None:
if self._create_hit_table is True:
description = data_struct.HitInfoTable().columns.copy()
hit_table = self.out_file_h5.create_table(self.out_file_h5.root, name='Hits', description=description, title='hit_data', filters=self._filter_table, chunkshape=(self._chunk_size / 100,))
if self._create_meta_word_index is True:
meta_word_index_table = self.out_file_h5.create_table(self.out_file_h5.root, name='EventMetaData', description=data_struct.MetaInfoWordTable, title='event_meta_data', filters=self._filter_table, chunkshape=(self._chunk_size / 10,))
if self._create_cluster_table:
cluster_table = self.out_file_h5.create_table(self.out_file_h5.root, name='Cluster', description=data_struct.ClusterInfoTable, title='Cluster data', filters=self._filter_table, expectedrows=self._chunk_size)
if self._create_cluster_hit_table:
description = data_struct.ClusterHitInfoTable().columns.copy()
cluster_hit_table = self.out_file_h5.create_table(self.out_file_h5.root, name='ClusterHits', description=description, title='cluster_hit_data', filters=self._filter_table, expectedrows=self._chunk_size)
logging.info("Interpreting raw data...")
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=analysis_utils.get_total_n_data_words(self.files_dict), term_width=80)
progress_bar.start()
total_words = 0
for file_index, raw_data_file in enumerate(self.files_dict.keys()): # loop over all raw data files
self.interpreter.reset_meta_data_counter()
with tb.open_file(raw_data_file, mode="r") as in_file_h5:
if use_settings_from_file:
self._deduce_settings_from_file(in_file_h5)
else:
self.fei4b = fei4b
if self.interpreter.meta_table_v2:
index_start = in_file_h5.root.meta_data.read(field='index_start')
index_stop = in_file_h5.root.meta_data.read(field='index_stop')
else:
index_start = in_file_h5.root.meta_data.read(field='start_index')
index_stop = in_file_h5.root.meta_data.read(field='stop_index')
bad_word_index = set()
# Check for bad data
if self._correct_corrupted_data:
tw = 2147483648 # trigger word
dh = 15269888 # data header
is_fe_data_header = logical_and(is_fe_word, is_data_header)
found_first_trigger = False
readout_slices = np.column_stack((index_start, index_stop))
previous_prepend_data_headers = None
prepend_data_headers = None
last_good_readout_index = None
last_index_with_event_data = None
for read_out_index, (index_start, index_stop) in enumerate(readout_slices):
try:
raw_data = in_file_h5.root.raw_data.read(index_start, index_stop)
except OverflowError, e:
pass
except tb.exceptions.HDF5ExtError:
break
# previous data chunk had bad data, check for good data
if (index_start - 1) in bad_word_index:
bad_data, current_prepend_data_headers, _ , _ = check_bad_data(raw_data, prepend_data_headers=1, trig_count=None)
if bad_data:
bad_word_index = bad_word_index.union(range(index_start, index_stop))
else:
# logging.info("found good data in %s from index %d to %d (chunk %d, length %d)" % (in_file_h5.filename, index_start, index_stop, read_out_index, (index_stop - index_start)))
if last_good_readout_index + 1 == read_out_index - 1:
logging.warning("found bad data in %s from index %d to %d (chunk %d, length %d)" % (in_file_h5.filename, readout_slices[last_good_readout_index][1], readout_slices[read_out_index - 1][1], last_good_readout_index + 1, (readout_slices[read_out_index - 1][1] - readout_slices[last_good_readout_index][1])))
else:
logging.warning("found bad data in %s from index %d to %d (chunk %d to %d, length %d)" % (in_file_h5.filename, readout_slices[last_good_readout_index][1], readout_slices[read_out_index - 1][1], last_good_readout_index + 1, read_out_index - 1, (readout_slices[read_out_index - 1][1] - readout_slices[last_good_readout_index][1])))
previous_good_raw_data = in_file_h5.root.raw_data.read(readout_slices[last_good_readout_index][0], readout_slices[last_good_readout_index][1] - 1)
previous_bad_raw_data = in_file_h5.root.raw_data.read(readout_slices[last_good_readout_index][1] - 1, readout_slices[read_out_index - 1][1])
fixed_raw_data, _ = fix_raw_data(previous_bad_raw_data, lsb_byte=None)
fixed_raw_data = np.r_[previous_good_raw_data, fixed_raw_data, raw_data]
_, prepend_data_headers, n_triggers, n_dh = check_bad_data(fixed_raw_data, prepend_data_headers=previous_prepend_data_headers, trig_count=self.trig_count)
last_good_readout_index = read_out_index
if n_triggers != 0 or n_dh != 0:
last_index_with_event_data = read_out_index
last_event_data_prepend_data_headers = prepend_data_headers
fixed_previous_raw_data = np.r_[previous_good_raw_data, fixed_raw_data]
_, previous_prepend_data_headers, _ , _ = check_bad_data(fixed_previous_raw_data, prepend_data_headers=previous_prepend_data_headers, trig_count=self.trig_count)
# check for bad data
else:
# workaround for first data chunk, might have missing trigger in some rare cases (already fixed in firmware)
if read_out_index == 0 and (np.any(is_trigger_word(raw_data) >= 1) or np.any(is_fe_data_header(raw_data) >= 1)):
bad_data, current_prepend_data_headers, n_triggers , n_dh = check_bad_data(raw_data, prepend_data_headers=1, trig_count=None)
# check for full last event in data
if current_prepend_data_headers == self.trig_count:
current_prepend_data_headers = None
# usually check for bad data happens here
else:
bad_data, current_prepend_data_headers, n_triggers , n_dh = check_bad_data(raw_data, prepend_data_headers=prepend_data_headers, trig_count=self.trig_count)
# do additional check with follow up data chunk and decide whether current chunk is defect or not
if bad_data:
if read_out_index == 0:
fixed_raw_data_chunk, _ = fix_raw_data(raw_data, lsb_byte=None)
fixed_raw_data_list = [fixed_raw_data_chunk]
else:
previous_raw_data = in_file_h5.root.raw_data.read(*readout_slices[read_out_index - 1])
raw_data_with_previous_data_word = np.r_[previous_raw_data[-1], raw_data]
fixed_raw_data_chunk, _ = fix_raw_data(raw_data_with_previous_data_word, lsb_byte=None)
fixed_raw_data = np.r_[previous_raw_data[:-1], fixed_raw_data_chunk]
# last data word of chunk before broken chunk migh be a trigger word or data header which cannot be recovered
fixed_raw_data_with_tw = np.r_[previous_raw_data[:-1], tw, fixed_raw_data_chunk]
fixed_raw_data_with_dh = np.r_[previous_raw_data[:-1], dh, fixed_raw_data_chunk]
fixed_raw_data_list = [fixed_raw_data, fixed_raw_data_with_tw, fixed_raw_data_with_dh]
bad_fixed_data, _, _ , _ = check_bad_data(fixed_raw_data_with_dh, prepend_data_headers=previous_prepend_data_headers, trig_count=self.trig_count)
bad_fixed_data = map(lambda data: check_bad_data(data, prepend_data_headers=previous_prepend_data_headers, trig_count=self.trig_count)[0], fixed_raw_data_list)
if not all(bad_fixed_data): # good fixed data
# last word in chunk before currrent chunk is also bad
if index_start != 0:
bad_word_index.add(index_start - 1)
# adding all word from current chunk
bad_word_index = bad_word_index.union(range(index_start, index_stop))
last_good_readout_index = read_out_index - 1
else:
# a previous chunk might be broken and the last data word becomes a trigger word, so do additional checks
if last_index_with_event_data and last_event_data_prepend_data_headers != read_out_index:
before_bad_raw_data = in_file_h5.root.raw_data.read(readout_slices[last_index_with_event_data - 1][0], readout_slices[last_index_with_event_data - 1][1] - 1)
previous_bad_raw_data = in_file_h5.root.raw_data.read(readout_slices[last_index_with_event_data][0] - 1, readout_slices[last_index_with_event_data][1])
fixed_raw_data, _ = fix_raw_data(previous_bad_raw_data, lsb_byte=None)
previous_good_raw_data = in_file_h5.root.raw_data.read(readout_slices[last_index_with_event_data][1], readout_slices[read_out_index - 1][1])
fixed_raw_data = np.r_[before_bad_raw_data, fixed_raw_data, previous_good_raw_data, raw_data]
bad_fixed_previous_data, current_prepend_data_headers, _, _ = check_bad_data(fixed_raw_data, prepend_data_headers=last_event_data_prepend_data_headers, trig_count=self.trig_count)
if not bad_fixed_previous_data:
logging.warning("found bad data in %s from index %d to %d (chunk %d, length %d)" % (in_file_h5.filename, readout_slices[last_index_with_event_data][0], readout_slices[last_index_with_event_data][1], last_index_with_event_data, (readout_slices[last_index_with_event_data][1] - readout_slices[last_index_with_event_data][0])))
bad_word_index = bad_word_index.union(range(readout_slices[last_index_with_event_data][0] - 1, readout_slices[last_index_with_event_data][1]))
else:
logging.warning("found bad data which cannot be corrected in %s from index %d to %d (chunk %d, length %d)" % (in_file_h5.filename, index_start, index_stop, read_out_index, (index_stop - index_start)))
else:
logging.warning("found bad data which cannot be corrected in %s from index %d to %d (chunk %d, length %d)" % (in_file_h5.filename, index_start, index_stop, read_out_index, (index_stop - index_start)))
if n_triggers != 0 or n_dh != 0:
last_index_with_event_data = read_out_index
last_event_data_prepend_data_headers = prepend_data_headers
if not bad_data or (bad_data and bad_fixed_data):
previous_prepend_data_headers = prepend_data_headers
prepend_data_headers = current_prepend_data_headers
consecutive_bad_words_list = consecutive(sorted(bad_word_index))
lsb_byte = None
# Loop over raw data in chunks
for word_index in range(0, in_file_h5.root.raw_data.shape[0], self._chunk_size): # loop over all words in the actual raw data file
try:
raw_data = in_file_h5.root.raw_data.read(word_index, word_index + self._chunk_size)
except OverflowError, e:
logging.error('%s: 2^31 xrange() limitation in 32-bit Python', e)
except tb.exceptions.HDF5ExtError:
logging.warning('Raw data file %s has missing raw data. Continue raw data analysis.', in_file_h5.filename)
break
total_words += raw_data.shape[0]
# fix bad data
if self._correct_corrupted_data:
# increase word shift for every bad data chunk in raw data chunk
word_shift = 0
chunk_indices = np.arange(word_index, word_index + self._chunk_size)
for consecutive_bad_word_indices in consecutive_bad_words_list:
selected_words = np.intersect1d(consecutive_bad_word_indices, chunk_indices, assume_unique=True)
if selected_words.shape[0]:
fixed_raw_data, lsb_byte = fix_raw_data(raw_data[selected_words - word_index - word_shift], lsb_byte=lsb_byte)
raw_data = np.r_[raw_data[:selected_words[0] - word_index - word_shift], fixed_raw_data, raw_data[selected_words[-1] - word_index + 1 - word_shift:]]
# check if last word of bad data chunk in current raw data chunk
if consecutive_bad_word_indices[-1] in selected_words:
lsb_byte = None
# word shift by removing data word at the beginning of each defect chunk
word_shift += 1
# bad data chunk is at the end of current raw data chunk
else:
break
self.interpreter.interpret_raw_data(raw_data) # interpret the raw data
# store remaining buffered event in the interpreter at the end of the last file
if file_index == len(self.files_dict.keys()) - 1 and word_index == range(0, in_file_h5.root.raw_data.shape[0], self._chunk_size)[-1]: # store hits of the latest event of the last file
self.interpreter.store_event()
hits = self.interpreter.get_hits()
if self.scan_parameters is not None:
nEventIndex = self.interpreter.get_n_meta_data_event()
self.histogram.add_meta_event_index(self.meta_event_index, nEventIndex)
if self.is_histogram_hits():
self.histogram_hits(hits)
if self.is_cluster_hits():
cluster_hits, clusters = self.cluster_hits(hits)
if self._create_cluster_hit_table:
cluster_hit_table.append(cluster_hits)
if self._create_cluster_table:
cluster_table.append(clusters)
if self._create_cluster_size_hist:
if clusters['size'].shape[0] > 0 and np.max(clusters['size']) + 1 > self._cluster_size_hist.shape[0]:
self._cluster_size_hist.resize(np.max(clusters['size']) + 1)
self._cluster_size_hist += fast_analysis_utils.hist_1d_index(clusters['size'], shape=self._cluster_size_hist.shape)
if self._create_cluster_tot_hist:
if clusters['tot'].shape[0] > 0 and np.max(clusters['tot']) + 1 > self._cluster_tot_hist.shape[0]:
self._cluster_tot_hist.resize((np.max(clusters['tot']) + 1, self._cluster_tot_hist.shape[1]))
if clusters['size'].shape[0] > 0 and np.max(clusters['size']) + 1 > self._cluster_tot_hist.shape[1]:
self._cluster_tot_hist.resize((self._cluster_tot_hist.shape[0], np.max(clusters['size']) + 1))
self._cluster_tot_hist += fast_analysis_utils.hist_2d_index(clusters['tot'], clusters['size'], shape=self._cluster_tot_hist.shape)
if self._analyzed_data_file is not None and self._create_hit_table:
hit_table.append(hits)
if self._analyzed_data_file is not None and self._create_meta_word_index:
size = self.interpreter.get_n_meta_data_word()
meta_word_index_table.append(meta_word[:size])
if total_words <= progress_bar.maxval: # Otherwise exception is thrown
progress_bar.update(total_words)
self.out_file_h5.flush()
progress_bar.finish()
self._create_additional_data()
if close_analyzed_data_file:
self.out_file_h5.close()
self.out_file_h5 = None
self.out_file_h5 = out_file_h5
if self.is_open(self.out_file_h5):
self._analyzed_data_file = self.out_file_h5.filename
else:
self._analyzed_data_file = None
def _create_additional_data(self):
logging.info('Creating selected event histograms...')
if self._analyzed_data_file is not None and self._create_meta_event_index:
meta_data_size = self.meta_data.shape[0]
n_event_index = self.interpreter.get_n_meta_data_event()
if meta_data_size == n_event_index:
if self.interpreter.meta_table_v2:
description = data_struct.MetaInfoEventTableV2().columns.copy()
else:
description = data_struct.MetaInfoEventTable().columns.copy()
last_pos = len(description)
if self.scan_parameters is not None: # add additional column with the scan parameter
for index, scan_par_name in enumerate(self.scan_parameters.dtype.names):
dtype, _ = self.scan_parameters.dtype.fields[scan_par_name][:2]
description[scan_par_name] = Col.from_dtype(dtype, dflt=0, pos=last_pos + index)
meta_data_out_table = self.out_file_h5.create_table(self.out_file_h5.root, name='meta_data', description=description, title='MetaData', filters=self._filter_table)
entry = meta_data_out_table.row
for i in range(0, n_event_index):
if self.interpreter.meta_table_v2:
entry['event_number'] = self.meta_event_index[i][0] # event index
entry['timestamp_start'] = self.meta_data[i][3] # timestamp
entry['timestamp_stop'] = self.meta_data[i][4] # timestamp
entry['error_code'] = self.meta_data[i][5] # error code
else:
entry['event_number'] = self.meta_event_index[i][0] # event index
entry['time_stamp'] = self.meta_data[i][3] # time stamp
entry['error_code'] = self.meta_data[i][4] # error code
if self.scan_parameters is not None: # scan parameter if available
for scan_par_name in self.scan_parameters.dtype.names:
entry[scan_par_name] = self.scan_parameters[scan_par_name][i]
entry.append()
self.out_file_h5.flush()
if self.scan_parameters is not None:
logging.info("Save meta data with scan parameter " + scan_par_name)
else:
logging.error('Meta data analysis failed')
if self._create_service_record_hist:
self.service_record_hist = self.interpreter.get_service_records_counters()
if self._analyzed_data_file is not None:
service_record_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistServiceRecord', title='Service Record Histogram', atom=tb.Atom.from_dtype(self.service_record_hist.dtype), shape=self.service_record_hist.shape, filters=self._filter_table)
service_record_hist_table[:] = self.service_record_hist
if self._create_tdc_counter_hist:
self.tdc_counter_hist = self.interpreter.get_tdc_counters()
if self._analyzed_data_file is not None:
tdc_counter_hist = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistTdcCounter', title='All Tdc word counter values', atom=tb.Atom.from_dtype(self.tdc_counter_hist.dtype), shape=self.tdc_counter_hist.shape, filters=self._filter_table)
tdc_counter_hist[:] = self.tdc_counter_hist
if self._create_error_hist:
self.error_counter_hist = self.interpreter.get_error_counters()
if self._analyzed_data_file is not None:
error_counter_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistErrorCounter', title='Error Counter Histogram', atom=tb.Atom.from_dtype(self.error_counter_hist.dtype), shape=self.error_counter_hist.shape, filters=self._filter_table)
error_counter_hist_table[:] = self.error_counter_hist
if self._create_trigger_error_hist:
self.trigger_error_counter_hist = self.interpreter.get_trigger_error_counters()
if self._analyzed_data_file is not None:
trigger_error_counter_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistTriggerErrorCounter', title='Trigger Error Counter Histogram', atom=tb.Atom.from_dtype(self.trigger_error_counter_hist.dtype), shape=self.trigger_error_counter_hist.shape, filters=self._filter_table)
trigger_error_counter_hist_table[:] = self.trigger_error_counter_hist
self._create_additional_hit_data()
self._create_additional_cluster_data()
def _create_additional_hit_data(self, safe_to_file=True):
logging.info('Create selected hit histograms')
if self._create_tot_hist:
self.tot_hist = self.histogram.get_tot_hist()
if self._analyzed_data_file is not None and safe_to_file:
tot_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistTot', title='ToT Histogram', atom=tb.Atom.from_dtype(self.tot_hist.dtype), shape=self.tot_hist.shape, filters=self._filter_table)
tot_hist_table[:] = self.tot_hist
if self._create_tot_pixel_hist:
if self._analyzed_data_file is not None and safe_to_file:
self.tot_pixel_hist_array = np.swapaxes(self.histogram.get_tot_pixel_hist(), 0, 1) # swap axis col,row, parameter --> row, col, parameter
tot_pixel_hist_out = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistTotPixel', title='Tot Pixel Histogram', atom=tb.Atom.from_dtype(self.tot_pixel_hist_array.dtype), shape=self.tot_pixel_hist_array.shape, filters=self._filter_table)
tot_pixel_hist_out[:] = self.tot_pixel_hist_array
if self._create_tdc_hist:
self.tdc_hist = self.histogram.get_tdc_hist()
if self._analyzed_data_file is not None and safe_to_file:
tdc_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistTdc', title='Tdc Histogram', atom=tb.Atom.from_dtype(self.tdc_hist.dtype), shape=self.tdc_hist.shape, filters=self._filter_table)
tdc_hist_table[:] = self.tdc_hist
if self._create_tdc_pixel_hist:
if self._analyzed_data_file is not None and safe_to_file:
self.tdc_pixel_hist_array = np.swapaxes(self.histogram.get_tdc_pixel_hist(), 0, 1) # swap axis col,row, parameter --> row, col, parameter
tdc_pixel_hist_out = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistTdcPixel', title='Tdc Pixel Histogram', atom=tb.Atom.from_dtype(self.tdc_pixel_hist_array.dtype), shape=self.tdc_pixel_hist_array.shape, filters=self._filter_table)
tdc_pixel_hist_out[:] = self.tdc_pixel_hist_array
if self._create_rel_bcid_hist:
self.rel_bcid_hist = self.histogram.get_rel_bcid_hist()
if self._analyzed_data_file is not None and safe_to_file:
if not self.set_stop_mode:
rel_bcid_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistRelBcid', title='relative BCID Histogram', atom=tb.Atom.from_dtype(self.rel_bcid_hist.dtype), shape=(16, ), filters=self._filter_table)
rel_bcid_hist_table[:] = self.rel_bcid_hist[0:16]
else:
rel_bcid_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistRelBcid', title='relative BCID Histogram in stop mode read out', atom=tb.Atom.from_dtype(self.rel_bcid_hist.dtype), shape=self.rel_bcid_hist.shape, filters=self._filter_table)
rel_bcid_hist_table[:] = self.rel_bcid_hist
if self._create_occupancy_hist:
self.occupancy_array = np.swapaxes(self.histogram.get_occupancy(), 0, 1) # swap axis col,row, parameter --> row, col, parameter
if self._analyzed_data_file is not None and safe_to_file:
occupancy_array_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistOcc', title='Occupancy Histogram', atom=tb.Atom.from_dtype(self.occupancy_array.dtype), shape=self.occupancy_array.shape, filters=self._filter_table)
occupancy_array_table[0:336, 0:80, 0:self.histogram.get_n_parameters()] = self.occupancy_array
if self._create_mean_tot_hist:
self.mean_tot_array = np.swapaxes(self.histogram.get_mean_tot(), 0, 1) # swap axis col,row, parameter --> row, col, parameter
if self._analyzed_data_file is not None and safe_to_file:
mean_tot_array_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistMeanTot', title='Mean ToT Histogram', atom=tb.Atom.from_dtype(self.mean_tot_array.dtype), shape=self.mean_tot_array.shape, filters=self._filter_table)
mean_tot_array_table[0:336, 0:80, 0:self.histogram.get_n_parameters()] = self.mean_tot_array
if self._create_threshold_hists:
_, scan_parameters_idx = np.unique(self.scan_parameters['PlsrDAC'], return_index=True)
scan_parameters = self.scan_parameters['PlsrDAC'][np.sort(scan_parameters_idx)]
if scan_parameters[0] >= scan_parameters[-1]:
raise analysis_utils.AnalysisError('Scan parameter PlsrDAC not increasing')
threshold, noise = np.zeros(80 * 336, dtype=np.float64), np.zeros(80 * 336, dtype=np.float64)
# calling fast algorithm function: M. Mertens, PhD thesis, Juelich 2010, note: noise zero if occupancy was zero
self.histogram.calculate_threshold_scan_arrays(threshold, noise, self._n_injection, np.min(self.scan_parameters['PlsrDAC']), np.max(self.scan_parameters['PlsrDAC']))
threshold_hist, noise_hist = np.reshape(a=threshold.view(), newshape=(80, 336), order='F'), np.reshape(a=noise.view(), newshape=(80, 336), order='F')
self.threshold_hist, self.noise_hist = np.swapaxes(threshold_hist, 0, 1), np.swapaxes(noise_hist, 0, 1)
if self._analyzed_data_file is not None and safe_to_file:
threshold_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistThreshold', title='Threshold Histogram', atom=tb.Atom.from_dtype(self.threshold_hist.dtype), shape=(336, 80), filters=self._filter_table)
threshold_hist_table[:] = self.threshold_hist
noise_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistNoise', title='Noise Histogram', atom=tb.Atom.from_dtype(self.noise_hist.dtype), shape=(336, 80), filters=self._filter_table)
noise_hist_table[:] = self.noise_hist
if self._create_fitted_threshold_hists:
_, scan_parameters_idx = np.unique(self.scan_parameters['PlsrDAC'], return_index=True)
scan_parameters = self.scan_parameters['PlsrDAC'][np.sort(scan_parameters_idx)]
self.scurve_fit_results = self.fit_scurves_multithread(self.out_file_h5, PlsrDAC=scan_parameters)
if self._analyzed_data_file is not None and safe_to_file:
fitted_threshold_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistThresholdFitted', title='Threshold Fitted Histogram', atom=tb.Atom.from_dtype(self.scurve_fit_results.dtype), shape=(336, 80), filters=self._filter_table)
fitted_noise_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistNoiseFitted', title='Noise Fitted Histogram', atom=tb.Atom.from_dtype(self.scurve_fit_results.dtype), shape=(336, 80), filters=self._filter_table)
fitted_threshold_hist_table.attrs.dimensions, fitted_noise_hist_table.attrs.dimensions = 'column, row, PlsrDAC', 'column, row, PlsrDAC'
fitted_threshold_hist_table[:], fitted_noise_hist_table[:] = self.scurve_fit_results[:, :, 0], self.scurve_fit_results[:, :, 1]
fitted_threshold_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistThresholdFittedCalib', title='Threshold Fitted Histogram with PlsrDAC clalibration', atom=tb.Atom.from_dtype(self.scurve_fit_results.dtype), shape=(336, 80), filters=self._filter_table)
fitted_noise_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistNoiseFittedCalib', title='Noise Fitted Histogram with PlsrDAC clalibration', atom=tb.Atom.from_dtype(self.scurve_fit_results.dtype), shape=(336, 80), filters=self._filter_table)
fitted_threshold_hist_table.attrs.dimensions, fitted_noise_hist_table.attrs.dimensions = 'column, row, electrons', 'column, row, electrons'
self.threshold_hist_calib, self.noise_hist_calib = self._get_plsr_dac_charge(self.scurve_fit_results[:, :, 0]), self._get_plsr_dac_charge(self.scurve_fit_results[:, :, 1], no_offset=True)
fitted_threshold_hist_table[:], fitted_noise_hist_table[:] = self.threshold_hist_calib, self.noise_hist_calib
def _create_additional_cluster_data(self, safe_to_file=True):
logging.info('Create selected cluster histograms')
if self._create_cluster_size_hist:
if self._analyzed_data_file is not None and safe_to_file:
cluster_size_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistClusterSize', title='Cluster Size Histogram', atom=tb.Atom.from_dtype(self._cluster_size_hist.dtype), shape=self._cluster_size_hist.shape, filters=self._filter_table)
cluster_size_hist_table[:] = self._cluster_size_hist
if self._create_cluster_tot_hist:
self._cluster_tot_hist[:, 0] = self._cluster_tot_hist.sum(axis=1) # First bin is the projection of the others
if self._analyzed_data_file is not None and safe_to_file:
cluster_tot_hist_table = self.out_file_h5.create_carray(self.out_file_h5.root, name='HistClusterTot', title='Cluster Tot Histogram', atom=tb.Atom.from_dtype(self._cluster_tot_hist.dtype), shape=self._cluster_tot_hist.shape, filters=self._filter_table)
cluster_tot_hist_table[:] = self._cluster_tot_hist
def analyze_hit_table(self, analyzed_data_file=None, analyzed_data_out_file=None):
'''Analyzes a hit table with the c++ histogrammming/clusterizer.
Parameters
----------
analyzed_data_file : string
The filename of the analyzed data file. If None, the analyzed data file
specified during initialization is taken.
Filename extension (.h5) does not need to be provided.
analyzed_data_out_file : string
The filename of the new analyzed data file. If None, the analyzed data file
specified during initialization is taken.
Filename extension (.h5) does not need to be provided.
'''
close_analyzed_data_file = False
if analyzed_data_file is not None: # if an output file name is specified create new file for analyzed data
if self.is_open(self.out_file_h5) and os.path.abspath(analyzed_data_file) == os.path.abspath(self.out_file_h5.filename):
in_file_h5 = self.out_file_h5
else:
# normalize path
analyzed_data_file = os.path.abspath(analyzed_data_file)
if os.path.splitext(analyzed_data_file)[1].lower() != ".h5":
analyzed_data_file = os.path.splitext(analyzed_data_file)[0] + ".h5"
in_file_h5 = tb.open_file(analyzed_data_file, mode="r+")
close_analyzed_data_file = True
elif self.is_open(self.out_file_h5):
in_file_h5 = self.out_file_h5
else:
raise ValueError('Parameter "analyzed_data_file" not specified.')
# set output file if an output file name is given, otherwise check if an output file is already opened
close_analyzed_data_out_file = False
if analyzed_data_out_file is not None: # if an output file name is specified create new file for analyzed data
if self.is_open(self.out_file_h5) and os.path.abspath(analyzed_data_out_file) == os.path.abspath(self.out_file_h5.filename):
out_file_h5 = self.out_file_h5
elif self.is_open(in_file_h5) and os.path.abspath(analyzed_data_out_file) == os.path.abspath(in_file_h5.filename):
out_file_h5 = in_file_h5
else:
# normalize path
analyzed_data_out_file = os.path.abspath(analyzed_data_out_file)
if os.path.splitext(analyzed_data_out_file)[1].lower() != ".h5":
analyzed_data_out_file = os.path.splitext(analyzed_data_out_file)[0] + ".h5"
out_file_h5 = tb.open_file(analyzed_data_out_file, mode="w", title="Analyzed FE-I4 hits")
close_analyzed_data_out_file = True
elif self.is_open(self.out_file_h5):
out_file_h5 = self.out_file_h5
else:
raise ValueError('Parameter "analyzed_data_out_file" not specified.')
tmp_out_file_h5 = self.out_file_h5
if not self.is_open(self.out_file_h5):
if os.path.abspath(in_file_h5.filename) == os.path.abspath(out_file_h5.filename):
close_analyzed_data_file = False
tmp_out_file_h5 = in_file_h5
self.out_file_h5 = out_file_h5
self._analyzed_data_file = self.out_file_h5.filename
if self._create_cluster_table:
cluster_table = self.out_file_h5.create_table(self.out_file_h5.root, name='Cluster', description=data_struct.ClusterInfoTable, title='cluster_hit_data', filters=self._filter_table, expectedrows=self._chunk_size)
if self._create_cluster_hit_table:
cluster_hit_table = self.out_file_h5.create_table(self.out_file_h5.root, name='ClusterHits', description=data_struct.ClusterHitInfoTable, title='cluster_hit_data', filters=self._filter_table, expectedrows=self._chunk_size)
if self._create_cluster_size_hist: # Cluster size result histogram
self._cluster_size_hist = np.zeros(shape=(6, ), dtype=np.uint32)
if self._create_cluster_tot_hist: # Cluster tot/size result histogram
self._cluster_tot_hist = np.zeros(shape=(16, 6), dtype=np.uint32)
try:
meta_data_table = in_file_h5.root.meta_data
meta_data = meta_data_table[:]
self.scan_parameters = analysis_utils.get_unique_scan_parameter_combinations(meta_data, scan_parameter_columns_only=True)
if self.scan_parameters is not None: # check if there is an additional column after the error code column, if yes this column has scan parameter infos
meta_event_index = np.ascontiguousarray(analysis_utils.get_unique_scan_parameter_combinations(meta_data)['event_number'].astype(np.uint64))
self.histogram.add_meta_event_index(meta_event_index, array_length=len(meta_event_index))
self.scan_parameter_index = analysis_utils.get_scan_parameters_index(self.scan_parameters) # a array that labels unique scan parameter combinations
self.histogram.add_scan_parameter(self.scan_parameter_index) # just add an index for the different scan parameter combinations
scan_parameter_names = analysis_utils.get_scan_parameter_names(self.scan_parameters)
logging.info('Adding scan parameter(s) for analysis: %s', (', ').join(scan_parameter_names) if scan_parameter_names else 'None',)
else:
logging.info("No scan parameter data provided")
self.histogram.set_no_scan_parameter()
except tb.exceptions.NoSuchNodeError:
logging.info("No meta data provided")
self.histogram.set_no_scan_parameter()
table_size = in_file_h5.root.Hits.nrows
n_hits = 0 # number of hits in actual chunk
logging.info('Analyzing hits...')
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=table_size, term_width=80)
progress_bar.start()
for hits, index in analysis_utils.data_aligned_at_events(in_file_h5.root.Hits, chunk_size=self._chunk_size):
n_hits += hits.shape[0]
if self.is_cluster_hits():
cluster_hits, clusters = self.cluster_hits(hits)
if self.is_histogram_hits():
self.histogram_hits(hits)
if self._analyzed_data_file is not None and self._create_cluster_hit_table:
cluster_hit_table.append(cluster_hits)
if self._analyzed_data_file is not None and self._create_cluster_table:
cluster_table.append(clusters)
if self._create_cluster_size_hist:
if clusters['size'].shape[0] > 0 and np.max(clusters['size']) + 1 > self._cluster_size_hist.shape[0]:
self._cluster_size_hist.resize(np.max(clusters['size']) + 1)
self._cluster_size_hist += fast_analysis_utils.hist_1d_index(clusters['size'], shape=self._cluster_size_hist.shape)
if self._create_cluster_tot_hist:
if clusters['tot'].shape[0] > 0 and np.max(clusters['tot']) + 1 > self._cluster_tot_hist.shape[0]:
self._cluster_tot_hist.resize((np.max(clusters['tot']) + 1, self._cluster_tot_hist.shape[1]))
if clusters['size'].shape[0] > 0 and np.max(clusters['size']) + 1 > self._cluster_tot_hist.shape[1]:
self._cluster_tot_hist.resize((self._cluster_tot_hist.shape[0], np.max(clusters['size']) + 1))
self._cluster_tot_hist += fast_analysis_utils.hist_2d_index(clusters['tot'], clusters['size'], shape=self._cluster_tot_hist.shape)
self.out_file_h5.flush()
progress_bar.update(index)
progress_bar.finish()
if table_size == 0:
logging.warning('Found no hits')
if n_hits != table_size:
raise analysis_utils.AnalysisError('Tables have different sizes. Not all hits were analyzed.')
self._create_additional_hit_data()
self._create_additional_cluster_data()
if close_analyzed_data_out_file:
out_file_h5.close()
if close_analyzed_data_file:
in_file_h5.close()
else:
self.out_file_h5 = tmp_out_file_h5
if self.is_open(self.out_file_h5):
self._analyzed_data_file = self.out_file_h5.filename
else:
self._analyzed_data_file = None
def analyze_hits(self, hits, scan_parameter=None):
n_hits = hits.shape[0]
logging.debug('Analyze %d hits' % n_hits)
if scan_parameter is None: # if nothing specified keep actual setting
logging.debug('Keep scan parameter settings ')
elif not scan_parameter: # set no scan parameter
logging.debug('No scan parameter used')
self.histogram.set_no_scan_parameter()
else:
logging.info('Setting a scan parameter')
self.histogram.add_scan_parameter(scan_parameter)
if self.is_cluster_hits():
logging.debug('Cluster hits')
cluster_hits, clusters = self.cluster_hits(hits)
else:
cluster_hits = None
clusters = None
if self.is_histogram_hits():
logging.debug('Histogramming hits')
self.histogram_hits(hits)
return cluster_hits, clusters
def cluster_hits(self, hits, start_index=None, stop_index=None):
return self.clusterizer.cluster_hits(hits[start_index:stop_index])
def histogram_hits(self, hits, start_index=None, stop_index=None):
self.histogram.add_hits(hits[start_index:stop_index])
def histogram_cluster_seed_hits(self, clusters, start_index=None, stop_index=None):
self.histogram.add_hits(clusters[start_index:stop_index])
def plot_histograms(self, pdf_filename=None, analyzed_data_file=None, maximum=None, create_hit_hists_only=False): # plots the histogram from output file if available otherwise from ram
logging.info('Creating histograms%s', (' (source: %s)' % analyzed_data_file) if analyzed_data_file is not None else (' (source: %s)' % self._analyzed_data_file) if self._analyzed_data_file is not None else '')
close_analyzed_data_file = False
if analyzed_data_file is not None:
if self.is_open(self.out_file_h5) and os.path.abspath(analyzed_data_file) == os.path.abspath(self.out_file_h5.filename):
out_file_h5 = self.out_file_h5
else:
# normalize path
analyzed_data_file = os.path.abspath(analyzed_data_file)
if os.path.splitext(analyzed_data_file)[1].lower() != ".h5":
analyzed_data_file = os.path.splitext(analyzed_data_file)[0] + ".h5"
out_file_h5 = tb.open_file(analyzed_data_file, mode="r")
close_analyzed_data_file = True
elif self.is_open(self.out_file_h5):
out_file_h5 = self.out_file_h5
else:
logging.info('Parameter "analyzed_data_file" not set. Use histograms from memory.')
if pdf_filename is not None:
# normalize path
pdf_filename = os.path.abspath(pdf_filename)
if os.path.splitext(pdf_filename)[1].lower() != ".pdf":
output_pdf_filename = os.path.splitext(pdf_filename)[0] + ".pdf"
else:
output_pdf_filename = pdf_filename
# reuse existing PDF file
if pdf_filename is not None and os.path.abspath(output_pdf_filename) != self.output_pdf._file.fh.name:
logging.info('Opening output PDF file: %s', output_pdf_filename)
output_pdf = PdfPages(output_pdf_filename)
close_pdf = True
else:
output_pdf = self.output_pdf
close_pdf = False
if output_pdf is None:
raise ValueError('Parameter "pdf_filename" not specified.')
logging.info('Saving histograms to PDF file: %s', str(output_pdf._file.fh.name))
if self._create_threshold_hists:
if self._create_threshold_mask: # mask pixel with bad data for plotting
if out_file_h5 is not None:
self.threshold_mask = analysis_utils.generate_threshold_mask(out_file_h5.root.HistNoise[:])
else:
self.threshold_mask = analysis_utils.generate_threshold_mask(self.noise_hist)
else:
self.threshold_mask = np.zeros_like(out_file_h5.root.HistThreshold[:] if out_file_h5 is not None else self.threshold_hist, dtype=np.bool)
threshold_hist = np.ma.array(out_file_h5.root.HistThreshold[:] if out_file_h5 is not None else self.threshold_hist, mask=self.threshold_mask)
noise_hist = np.ma.array(out_file_h5.root.HistNoise[:] if out_file_h5 is not None else self.noise_hist, mask=self.threshold_mask)
mask_cnt = np.ma.count_masked(noise_hist)
logging.info('Fast algorithm: masking %d pixel(s)', mask_cnt)
plotting.plot_three_way(hist=threshold_hist, title='Threshold%s' % ((' (masked %i pixel(s))' % mask_cnt) if self._create_threshold_mask else ''), x_axis_title="threshold [PlsrDAC]", filename=output_pdf, bins=100, minimum=0, maximum=maximum)
plotting.plot_three_way(hist=noise_hist, title='Noise%s' % ((' (masked %i pixel(s))' % mask_cnt) if self._create_threshold_mask else ''), x_axis_title="noise [PlsrDAC]", filename=output_pdf, bins=100, minimum=0, maximum=maximum)
if self._create_fitted_threshold_hists:
if self._create_fitted_threshold_mask:
if out_file_h5 is not None:
self.fitted_threshold_mask = analysis_utils.generate_threshold_mask(out_file_h5.root.HistNoiseFitted[:])
else:
self.fitted_threshold_mask = analysis_utils.generate_threshold_mask(self.scurve_fit_results[:, :, 1])
else:
self.threshold_mask = np.zeros_like(out_file_h5.root.HistThresholdFitted[:] if out_file_h5 is not None else self.scurve_fit_results, dtype=np.bool8)
threshold_hist = np.ma.array(out_file_h5.root.HistThresholdFitted[:] if out_file_h5 is not None else self.scurve_fit_results[:, :, 0], mask=self.fitted_threshold_mask)
noise_hist = np.ma.array(out_file_h5.root.HistNoiseFitted[:] if out_file_h5 is not None else self.scurve_fit_results[:, :, 1], mask=self.fitted_threshold_mask)
threshold_hist_calib = np.ma.array(out_file_h5.root.HistThresholdFittedCalib[:] if out_file_h5 is not None else self.threshold_hist_calib[:], mask=self.fitted_threshold_mask)
noise_hist_calib = np.ma.array(out_file_h5.root.HistNoiseFittedCalib[:] if out_file_h5 is not None else self.noise_hist_calib[:], mask=self.fitted_threshold_mask)
mask_cnt = np.ma.count_masked(noise_hist)
logging.info('S-curve fit: masking %d pixel(s)', mask_cnt)
plotting.plot_three_way(hist=threshold_hist, title='Threshold (S-curve fit, masked %i pixel(s))' % mask_cnt, x_axis_title="Threshold [PlsrDAC]", filename=output_pdf, bins=100, minimum=0, maximum=maximum)
plotting.plot_three_way(hist=noise_hist, title='Noise (S-curve fit, masked %i pixel(s))' % mask_cnt, x_axis_title="Noise [PlsrDAC]", filename=output_pdf, bins=100, minimum=0, maximum=maximum)
plotting.plot_three_way(hist=threshold_hist_calib, title='Threshold (S-curve fit, masked %i pixel(s))' % mask_cnt, x_axis_title="Threshold [e]", filename=output_pdf, bins=100, minimum=0)
plotting.plot_three_way(hist=noise_hist_calib, title='Noise (S-curve fit, masked %i pixel(s))' % mask_cnt, x_axis_title="Noise [e]", filename=output_pdf, bins=100, minimum=0)
if self._create_occupancy_hist:
if self._create_fitted_threshold_hists:
_, scan_parameters_idx = np.unique(self.scan_parameters['PlsrDAC'], return_index=True)
scan_parameters = self.scan_parameters['PlsrDAC'][np.sort(scan_parameters_idx)]
plotting.plot_scurves(occupancy_hist=out_file_h5.root.HistOcc[:] if out_file_h5 is not None else self.occupancy_array[:], filename=output_pdf, scan_parameters=scan_parameters, scan_parameter_name="PlsrDAC")
else:
hist = np.sum(out_file_h5.root.HistOcc[:], axis=2) if out_file_h5 is not None else np.sum(self.occupancy_array[:], axis=2)
occupancy_array_masked = np.ma.masked_equal(hist, 0)
if self._create_source_scan_hist:
plotting.plot_fancy_occupancy(hist=occupancy_array_masked, filename=output_pdf, z_max='median')
plotting.plot_occupancy(hist=occupancy_array_masked, filename=output_pdf, z_max='maximum')
else:
plotting.plot_three_way(hist=occupancy_array_masked, title="Occupancy", x_axis_title="occupancy", filename=output_pdf, maximum=maximum)
plotting.plot_occupancy(hist=occupancy_array_masked, filename=output_pdf, z_max='median')
if self._create_tot_hist:
plotting.plot_tot(hist=out_file_h5.root.HistTot[:] if out_file_h5 is not None else self.tot_hist, filename=output_pdf)
if self._create_tot_pixel_hist:
tot_pixel_hist = out_file_h5.root.HistTotPixel[:] if out_file_h5 is not None else self.tot_pixel_hist_array
total_hits_masked = np.ma.masked_equal(np.sum(tot_pixel_hist, axis=2), 0)
mean_pixel_tot = np.average(tot_pixel_hist, axis=2, weights=range(16)) * sum(range(0, 16)) / total_hits_masked
plotting.plot_three_way(mean_pixel_tot, title='Mean ToT', x_axis_title='mean ToT', filename=output_pdf, minimum=0, maximum=15)
if self._create_tdc_counter_hist:
plotting.plot_tdc_counter(hist=out_file_h5.root.HistTdcCounter[:] if out_file_h5 is not None else self.tdc_hist_counter, filename=output_pdf)
if self._create_tdc_hist:
plotting.plot_tdc(hist=out_file_h5.root.HistTdc[:] if out_file_h5 is not None else self.tdc_hist, filename=output_pdf)
if self._create_cluster_size_hist:
plotting.plot_cluster_size(hist=out_file_h5.root.HistClusterSize[:] if out_file_h5 is not None else self.cluster_size_hist, filename=output_pdf)
if self._create_cluster_tot_hist:
plotting.plot_cluster_tot(hist=out_file_h5.root.HistClusterTot[:] if out_file_h5 is not None else self.cluster_tot_hist, filename=output_pdf)
if self._create_cluster_tot_hist and self._create_cluster_size_hist:
plotting.plot_cluster_tot_size(hist=out_file_h5.root.HistClusterTot[:] if out_file_h5 is not None else self.cluster_tot_hist, filename=output_pdf)
if self._create_rel_bcid_hist:
if self.set_stop_mode:
plotting.plot_relative_bcid_stop_mode(hist=out_file_h5.root.HistRelBcid[:] if out_file_h5 is not None else self.rel_bcid_hist, filename=output_pdf)
else:
plotting.plot_relative_bcid(hist=out_file_h5.root.HistRelBcid[0:16] if out_file_h5 is not None else self.rel_bcid_hist[0:16], filename=output_pdf)
if self._create_tdc_pixel_hist:
tdc_pixel_hist = out_file_h5.root.HistTdcPixel[:, :, :1024] if out_file_h5 is not None else self.tdc_pixel_hist_array[:, :, :1024] # only take first 1024 values, otherwise memory error likely
total_hits_masked = np.ma.masked_equal(np.sum(tdc_pixel_hist, axis=2), 0)
mean_pixel_tdc = np.average(tdc_pixel_hist, axis=2, weights=range(1024)) * sum(range(0, 1024)) / total_hits_masked
plotting.plot_three_way(mean_pixel_tdc, title='Mean TDC', x_axis_title='mean TDC', maximum=2 * np.ma.median(np.ma.masked_invalid(mean_pixel_tdc)), filename=output_pdf)
if not create_hit_hists_only:
if analyzed_data_file is None and self._create_error_hist:
plotting.plot_event_errors(hist=out_file_h5.root.HistErrorCounter[:] if out_file_h5 is not None else self.error_counter_hist, filename=output_pdf)
if analyzed_data_file is None and self._create_service_record_hist:
plotting.plot_service_records(hist=out_file_h5.root.HistServiceRecord[:] if out_file_h5 is not None else self.service_record_hist, filename=output_pdf)
if analyzed_data_file is None and self._create_trigger_error_hist:
plotting.plot_trigger_errors(hist=out_file_h5.root.HistTriggerErrorCounter[:] if out_file_h5 is not None else self.trigger_error_counter_hist, filename=output_pdf)
if close_analyzed_data_file:
out_file_h5.close()
if close_pdf:
logging.info('Closing output PDF file: %s', str(output_pdf._file.fh.name))
output_pdf.close()
def fit_scurves_multithread(self, hit_table_file=None, PlsrDAC=None):
logging.info("Start S-curve fit on %d CPU core(s)", mp.cpu_count())
occupancy_hist = hit_table_file.root.HistOcc[:] if hit_table_file is not None else self.occupancy_array[:] # take data from RAM if no file is opened
occupancy_hist_shaped = occupancy_hist.reshape(occupancy_hist.shape[0] * occupancy_hist.shape[1], occupancy_hist.shape[2])
# reverse data to fit s-curve
if PlsrDAC[0] > PlsrDAC[-1]:
occupancy_hist_shaped = np.flip(occupancy_hist_shaped, axis=1)
PlsrDAC = np.flip(PlsrDAC, axis=0)
partialfit_scurve = partial(fit_scurve, PlsrDAC=PlsrDAC) # trick to give a function more than one parameter, needed for pool.map
pool = mp.Pool() # create as many workers as physical cores are available
try:
result_list = pool.map(partialfit_scurve, occupancy_hist_shaped.tolist())
except TypeError:
raise analysis_utils.NotSupportedError('Less than 3 points found for S-curve fit.')
finally:
pool.close()
pool.join()
result_array = np.array(result_list)
logging.info("S-curve fit finished")
return result_array.reshape(occupancy_hist.shape[0], occupancy_hist.shape[1], 2)
def is_open(self, h5_file):
if isinstance(h5_file, tb.file.File):
return True
return False
def is_histogram_hits(self): # returns true if a setting needs to have the hit histogramming active
if self._create_occupancy_hist or self._create_tot_hist or self._create_rel_bcid_hist or self._create_hit_table or self._create_threshold_hists or self._create_fitted_threshold_hists:
return True
return False
def is_cluster_hits(self): # returns true if a setting needs to have the clusterizer active
if self.create_cluster_hit_table or self.create_cluster_table or self.create_cluster_size_hist or self.create_cluster_tot_hist:
return True
return False
def _deduce_settings_from_file(self, opened_raw_data_file): # TODO: parse better
'''Tries to get the scan parameters needed for analysis from the raw data file
'''
try: # take infos raw data files (not avalable in old files)
flavor = opened_raw_data_file.root.configuration.miscellaneous[:][np.where(opened_raw_data_file.root.configuration.miscellaneous[:]['name'] == 'Flavor')]['value'][0]
self._settings_from_file_set = True
# adding this for special cases e.g., stop-mode scan
if "trig_count" in opened_raw_data_file.root.configuration.run_conf[:]['name']:
trig_count = opened_raw_data_file.root.configuration.run_conf[:][np.where(opened_raw_data_file.root.configuration.run_conf[:]['name'] == 'trig_count')]['value'][0]
else:
trig_count = opened_raw_data_file.root.configuration.global_register[:][np.where(opened_raw_data_file.root.configuration.global_register[:]['name'] == 'Trig_Count')]['value'][0]
vcal_c0 = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'Vcal_Coeff_0')]['value'][0]
vcal_c1 = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'Vcal_Coeff_1')]['value'][0]
c_low = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'C_Inj_Low')]['value'][0]
c_mid = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'C_Inj_Med')]['value'][0]
c_high = opened_raw_data_file.root.configuration.calibration_parameters[:][np.where(opened_raw_data_file.root.configuration.calibration_parameters[:]['name'] == 'C_Inj_High')]['value'][0]
self.c_low_mask = opened_raw_data_file.root.configuration.C_Low[:]
self.c_high_mask = opened_raw_data_file.root.configuration.C_High[:]
self.fei4b = False if str(flavor) == 'fei4a' else True
self.trig_count = int(trig_count)
self.vcal_c0 = float(vcal_c0)
self.vcal_c1 = float(vcal_c1)
self.c_low = float(c_low)
self.c_mid = float(c_mid)
self.c_high = float(c_high)
self.n_injections = int(opened_raw_data_file.root.configuration.run_conf[:][np.where(opened_raw_data_file.root.configuration.run_conf[:]['name'] == 'n_injections')]['value'][0])
except tb.exceptions.NoSuchNodeError:
if not self._settings_from_file_set:
logging.warning('No settings stored in raw data file %s, use standard settings', opened_raw_data_file.filename)
else:
logging.info('No settings provided in raw data file %s, use already set settings', opened_raw_data_file.filename)
except IndexError: # happens if setting is not available (e.g. repeat_command)
pass
def _get_plsr_dac_charge(self, plsr_dac_array, no_offset=False):
'''Takes the PlsrDAC calibration and the stored C-high/C-low mask to calculate the charge from the PlsrDAC array on a pixel basis
'''
charge = np.zeros_like(self.c_low_mask, dtype=np.float16) # charge in electrons
if self.vcal_c0 is not None and self.vcal_c1 is not None and self.c_low is not None and self.c_mid is not None and self.c_high is not None:
voltage = self.vcal_c1 * plsr_dac_array if no_offset else self.vcal_c0 + self.vcal_c1 * plsr_dac_array
charge[np.logical_and(self.c_low_mask, ~self.c_high_mask)] = voltage[np.logical_and(self.c_low_mask, ~self.c_high_mask)] * self.c_low / 0.16022
charge[np.logical_and(~self.c_low_mask, self.c_high_mask)] = voltage[np.logical_and(self.c_low_mask, ~self.c_high_mask)] * self.c_mid / 0.16022
charge[np.logical_and(self.c_low_mask, self.c_high_mask)] = voltage[np.logical_and(self.c_low_mask, ~self.c_high_mask)] * self.c_high / 0.16022
return charge
if __name__ == "__main__":
pass
|
SiLab-Bonn/pyBAR
|
pybar/analysis/analyze_raw_data.py
|
Python
|
bsd-3-clause
| 88,134
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Nahuel Riva
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__revision__ = "$Id$"
__all__ = ['metadata', 'setup']
from distutils.core import setup
from distutils import version
from warnings import warn
import re
import os
import sys
import glob
# Distutils hack: in order to be able to build MSI installers with loose
# version numbers, we subclass StrictVersion to accept loose version numbers
# and convert them to the strict format. This works because Distutils will
# happily reinstall a package even if the version number matches exactly the
# one already installed on the system - so we can simply strip all extraneous
# characters and beta/postrelease version numbers will be treated just like
# the base version number.
if __name__ == '__main__':
StrictVersion = version.StrictVersion
class NotSoStrictVersion (StrictVersion):
def parse (self, vstring):
components = []
for token in vstring.split('.'):
token = token.strip()
match = re.search('^[0-9]+', token)
if match:
number = token[ match.start() : match.end() ]
components.append(number)
vstring = '.'.join(components)
return StrictVersion.parse(self, vstring)
version.StrictVersion = NotSoStrictVersion
# Get the base directory
here = os.path.dirname(__file__)
if not here:
here = os.path.curdir
# Text describing the module (reStructured text)
try:
readme = os.path.join(here, 'README')
long_description = open(readme, 'r').read()
except Exception:
warn("README file not found or unreadable!")
long_description = """pype32 is python library to read and write PE/PE+ binary files."""
# Get the list of scripts in the "tools" folder
scripts = glob.glob(os.path.join(here, 'tools', '*.py'))
# Set the parameters for the setup script
metadata = {
# Setup instructions
'provides' : ['pype32'],
'packages' : ['pype32'],
'scripts' : scripts,
# Metadata
'name' : 'pype32',
'version' : '0.1-alpha4',
'description' : 'Yet another Python library to read and write PE/PE+ files.',
'long_description' : long_description,
'author' : 'Nahuel Riva',
'author_email' : 'crackinglandia'+chr(64)+'gmail'+chr(0x2e)+'com',
'url' : 'https://github.com/crackinglandia/pype32',
'keywords' : ['pecoff', 'x86', 'x64', '.net', 'parser'],
'download_url' : 'https://github.com/crackinglandia/pype32/tarball/v0.1-alpha4',
}
# Execute the setup script
if __name__ == '__main__':
setup(**metadata)
|
snemes/pype32
|
setup.py
|
Python
|
bsd-3-clause
| 4,235
|
from __future__ import absolute_import
class Newsletter(object):
__all__ = ('is_enabled', 'get_subscriptions', 'update_subscription',
'create_or_update_subscription')
DEFAULT_LIST_ID = 1
enabled = False
def is_enabled(self):
return self.enabled
def get_subscriptions(self, user):
return None
def update_subscription(self, user, **kwargs):
return None
def create_or_update_subscription(self, user, **kwargs):
kwargs['create'] = True
return self.update_subscription(user, **kwargs)
|
JamesMura/sentry
|
src/sentry/newsletter/base.py
|
Python
|
bsd-3-clause
| 571
|
from django.contrib.auth.backends import ModelBackend
# Live sessions will still be using this backend for a while.
# TODO: Remove after there are no more sessions using this in prod.
class Sha256Backend(ModelBackend):
"""Overriding the Django model backend without changes."""
pass
|
mozilla/kitsune
|
kitsune/users/backends.py
|
Python
|
bsd-3-clause
| 294
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\JohnnyG\Documents\XRDproject_Python_11June2010Release backup\highlowDialog.ui'
#
# Created: Mon Jun 14 16:20:37 2010
# by: PyQt4 UI code generator 4.5.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_highlowDialog(object):
def setupUi(self, highlowDialog):
highlowDialog.setObjectName("highlowDialog")
highlowDialog.resize(352, 128)
self.buttonBox = QtGui.QDialogButtonBox(highlowDialog)
self.buttonBox.setGeometry(QtCore.QRect(0, 70, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.lowSpinBox = QtGui.QDoubleSpinBox(highlowDialog)
self.lowSpinBox.setGeometry(QtCore.QRect(20, 40, 62, 22))
self.lowSpinBox.setMinimum(-1000000.0)
self.lowSpinBox.setMaximum(1000000.0)
self.lowSpinBox.setObjectName("lowSpinBox")
self.highSpinBox = QtGui.QDoubleSpinBox(highlowDialog)
self.highSpinBox.setGeometry(QtCore.QRect(100, 40, 62, 20))
self.highSpinBox.setMinimum(-1000000.0)
self.highSpinBox.setMaximum(1000000.0)
self.highSpinBox.setObjectName("highSpinBox")
self.label = QtGui.QLabel(highlowDialog)
self.label.setGeometry(QtCore.QRect(20, 20, 71, 16))
self.label.setObjectName("label")
self.label_2 = QtGui.QLabel(highlowDialog)
self.label_2.setGeometry(QtCore.QRect(100, 20, 76, 16))
self.label_2.setObjectName("label_2")
self.retranslateUi(highlowDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), highlowDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), highlowDialog.reject)
QtCore.QMetaObject.connectSlotsByName(highlowDialog)
def retranslateUi(self, highlowDialog):
highlowDialog.setWindowTitle(QtGui.QApplication.translate("highlowDialog", "Enter range for colorbar", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("highlowDialog", "low value", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("highlowDialog", "high value", None, QtGui.QApplication.UnicodeUTF8))
|
johnmgregoire/vanDover_CHESS
|
ui_highlowDialog.py
|
Python
|
bsd-3-clause
| 2,451
|
import logging
import pymongo
import emission.net.usercache.abstract_usercache as enua
import emission.core.get_database as edb
import emission.core.wrapper.trip as ecwt
import emission.core.wrapper.section as ecws
import emission.core.wrapper.stop as ecwst
import emission.storage.decorations.timeline as esdt
def create_new_trip(user_id):
_id = edb.get_trip_new_db().save({"user_id": user_id})
return ecwt.Trip({"_id": _id, "user_id": user_id})
def save_trip(trip):
edb.get_trip_new_db().save(trip)
def _get_ts_query(tq):
time_key = tq.timeType
ret_query = {time_key: {"$lt": tq.endTs}}
if (tq.startTs is not None):
ret_query[time_key].update({"$gte": tq.startTs})
return ret_query
def get_trips(user_id, time_query):
curr_query = _get_ts_query(time_query)
curr_query.update({"user_id": user_id})
trip_doc_cursor = edb.get_trip_new_db().find(curr_query).sort(time_query.timeType, pymongo.ASCENDING)
# TODO: Fix "TripIterator" and return it instead of this list
return [ecwt.Trip(doc) for doc in trip_doc_cursor]
def get_aggregate_trips(time_query, box=None):
curr_query = _get_ts_query(time_query)
if box:
curr_query.update({"start_loc" : {"$geoWithin" : {"$box": box}}})
curr_query.update({"end_loc" : {"$geoWithin" : {"$box": box}}})
trip_doc_cursor = edb.get_trip_new_db().find(curr_query).sort(time_query.timeType, pymongo.ASCENDING)
return [ecwt.Trip(doc) for doc in trip_doc_cursor]
def get_trip(trip_id):
"""
Returns the trip for specified trip id.
:rtype : emission.core.wrapper.Trip
"""
return ecwt.Trip(edb.get_trip_new_db().find_one({"_id": trip_id}))
def get_time_query_for_trip(trip_id):
trip = get_trip(trip_id)
return enua.UserCache.TimeQuery("write_ts", trip.start_ts, trip.end_ts)
def get_sections_for_trip(user_id, trip_id):
"""
Get the set of sections that are children of this trip.
"""
section_doc_cursor = edb.get_section_new_db().find({"user_id": user_id, "trip_id": trip_id}).sort("start_ts", pymongo.ASCENDING)
return [ecws.Section(doc) for doc in section_doc_cursor]
def get_stops_for_trip(user_id, trip_id):
"""
Get the set of sections that are children of this trip.
"""
stop_doc_cursor = edb.get_stop_db().find({"user_id": user_id, "trip_id": trip_id}).sort("enter_ts", pymongo.ASCENDING)
logging.debug("About to execute query %s" % {"user_id": user_id, "trip_id": trip_id})
return [ecwst.Stop(doc) for doc in stop_doc_cursor]
def get_timeline_for_trip(user_id, trip_id):
"""
Get an ordered sequence of sections and stops corresponding to this trip.
"""
return esdt.Timeline(get_stops_for_trip(user_id, trip_id),
get_sections_for_trip(user_id, trip_id))
|
joshzarrabi/e-mission-server
|
emission/storage/decorations/trip_queries.py
|
Python
|
bsd-3-clause
| 2,808
|
# -*- coding: utf-8 -*-
#
# DyNe documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 10 16:15:03 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'DyNe'
copyright = u'2016, Ankit Khambhati'
author = u'Ankit Khambhati'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.5'
# The full version, including alpha/beta/rc tags.
release = u'0.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DyNedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DyNe.tex', u'DyNe Documentation',
u'Ankit Khambhati', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dyne', u'DyNe Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DyNe', u'DyNe Documentation',
author, 'DyNe', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
akhambhati/dyne
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 9,233
|
"""
Simple script to help create files needed to make a sphinx documentation
website of the flopy project. The script will read through all of the
flopy modules and create the sphinx autodoc rst (restructured text) files.
"""
import os
# print current working directory
print(os.getcwd())
# look through the following subdirectories, and grab all of the
# modules that should be added to the sphinx documentation.
flopypth = os.path.join('..', '..', '..', 'flopy3fork.git', 'flopy')
pthlist = ['export', 'modflow', 'modflowlgr', 'modpath', 'mt3d', 'pest',
'plot', 'seawat', 'utils', 'mf6', 'mf6/modflow', 'discretization']
namelist = []
for pth in pthlist:
dirpth = os.path.join(flopypth, pth)
filelist = os.listdir(dirpth)
for filename in filelist:
if '.pyc' in filename:
continue
if '__init__' in filename:
continue
if '.py' in filename:
prefix = filename.strip().split('.')[0]
nm = 'flopy.' + pth + '.' + prefix
print (nm)
namelist.append(nm)
fnamelist = open('fnamelist.txt', 'w')
for name in namelist:
fnamelist.write(' ' + name + '\n')
prefix = name.strip().split('.')[2]
fname = prefix + '.rst'
if not os.path.exists(fname):
print('Creating new rst file: {}'.format(fname))
f = open(fname, 'w')
s = name.replace('/', '.') + ' Module'
f.write(s + '\n')
s = len(s) * '='
f.write(s + '\n\n')
s = '.. automodule:: ' + name.replace('/', '.')
f.write(s + '\n')
s = ' :members:'
f.write(s + '\n')
s = ' :inherited-members:'
f.write(s + '\n')
f.close()
fnamelist.close()
|
modflowpy/flopydoc
|
docs/mkrst.py
|
Python
|
bsd-3-clause
| 1,785
|
from django.db import migrations
from corehq.apps.smsbillables.management.commands.bootstrap_gateway_fees import (
bootstrap_pinpoint_gateway,
)
def add_pinpoint_gateway_fee_for_migration(apps, schema_editor):
bootstrap_pinpoint_gateway(apps)
class Migration(migrations.Migration):
dependencies = [
('smsbillables', '0021_infobip_gateway_fee_amount_null'),
]
operations = [
migrations.RunPython(add_pinpoint_gateway_fee_for_migration),
]
|
dimagi/commcare-hq
|
corehq/apps/smsbillables/migrations/0022_pinpoint_gateway_fee_amount_null.py
|
Python
|
bsd-3-clause
| 485
|
import os
import sys
import imp
import logging
from collections import namedtuple
"""
Objects used to configure Glue at runtime.
"""
__all__ = ['Registry', 'SettingRegistry', 'ExporterRegistry',
'ColormapRegistry', 'DataFactoryRegistry', 'QtClientRegistry',
'LinkFunctionRegistry', 'LinkHelperRegistry',
'ProfileFitterRegistry',
'qt_client', 'data_factory', 'link_function', 'link_helper',
'colormaps',
'exporters', 'settings', 'fit_plugin']
class Registry(object):
"""Container to hold groups of objects or settings.
Registry instances are used by Glue to track objects
used for various tasks like data linking, widget creation, etc.
They have the following properties:
- A `members` property, which lists each item in the registry
- A `default_members` function, which can be overridden to lazily
initialize the members list
- A call interface, allowing the instance to be used as a decorator
for users to add new items to the registry in their config files
"""
def __init__(self):
self._members = []
self._loaded = False
@property
def members(self):
""" A list of the members in the registry.
The return value is a list. The contents of the list
are specified in each subclass"""
if not self._loaded:
self._members = self.default_members() + self._members
self._loaded = True
return self._members
def default_members(self):
"""The member items provided by default. These are put in this
method so that code is only imported when needed"""
return []
def add(self, value):
""" Add a new item to the registry """
self._members.append(value)
def __iter__(self):
return iter(self.members)
def __len__(self):
return len(self.members)
def __contains__(self, value):
return value in self.members
def __call__(self, arg):
"""This is provided so that registry instances can be used
as decorators. The decorators should add the decorated
code object to the registry, and return the original function"""
self.add(arg)
return arg
class SettingRegistry(Registry):
"""Stores key/value settings that code can use to customize Glue
Each member is a tuple of 3 items:
- key: the setting name [str]
- value: the default setting [object]
- validator: A function which tests whether the input is a valid value,
and raises a ValueError if invalid. On valid input,
returns the (possibly sanitized) setting value.
"""
def add(self, key, value, validator=str):
self.members.append((key, value, validator))
class ExporterRegistry(Registry):
"""Stores functions which can export an applocation to an output file
The members property is a list of exporters, each represented
as a (label, save_function, can_save_function, outmode) tuple.
save_function takes an (application, path) as input, and saves
the session
can_save_function takes an application as input, and raises an
exception if saving this session is not possible
outmode is a string, with one of 3 values:
'file': indicates that exporter creates a file
'directory': exporter creates a directory
'label': exporter doesn't write to disk, but needs a label
"""
def default_members(self):
return []
def add(self, label, exporter, checker, outmode='file'):
"""
Add a new exporter
:param label: Short label for the exporter
:type label: str
:param exporter: exporter function
:type exporter: function(application, path)
:param checker: function that checks if save is possible
:type exporter: function(application)
``exporter`` should raise an exception if export isn't possible.
:param outmode: What kind of output is created?
:type outmode: str ('file' | 'directory' | 'label')
"""
self.members.append((label, exporter, checker, outmode))
class ColormapRegistry(Registry):
"""Stores colormaps for the Image Viewer. The members property is
a list of colormaps, each represented as a [name,cmap] pair.
"""
def default_members(self):
import matplotlib.cm as cm
members = []
members.append(['Gray', cm.gray])
members.append(['Purple-Blue', cm.PuBu])
members.append(['Yellow-Green-Blue', cm.YlGnBu])
members.append(['Yellow-Orange-Red', cm.YlOrRd])
members.append(['Red-Purple', cm.RdPu])
members.append(['Blue-Green', cm.BuGn])
members.append(['Hot', cm.hot])
members.append(['Red-Blue', cm.RdBu])
members.append(['Red-Yellow-Blue', cm.RdYlBu])
members.append(['Purple-Orange', cm.PuOr])
members.append(['Purple-Green', cm.PRGn])
return members
def add(self, label, cmap):
"""
Add colormap *cmap* with label *label*.
"""
self.members.append([label, cmap])
class DataFactoryRegistry(Registry):
"""Stores data factories. Data factories take filenames as input,
and return :class:`~glue.core.data.Data` instances
The members property returns a list of (function, label, identifier)
namedtuples:
- Function is the factory that creates the data object
- label is a short human-readable description of the factory
- identifier is a function that takes ``(filename, **kwargs)`` as input
and returns True if the factory can open the file
New data factories can be registered via::
@data_factory('label_name', identifier, default='txt')
def new_factory(file_name):
...
This has the additional side-effect of associating
this this factory with filenames ending in ``txt`` by default
"""
item = namedtuple('DataFactory', 'function label identifier')
def default_members(self):
from .core.data_factories import __factories__
return [self.item(f, f.label, f.identifier) for f in __factories__]
def __call__(self, label, identifier, default=''):
from .core.data_factories import set_default_factory
def adder(func):
set_default_factory(default, func)
self.add(self.item(func, label, identifier))
return func
return adder
class QtClientRegistry(Registry):
"""Stores QT widgets to visualize data.
The members property is a list of Qt widget classes
New widgets can be registered via::
@qt_client
class CustomWidget(QMainWindow):
...
"""
def default_members(self):
try:
from .qt.widgets.scatter_widget import ScatterWidget
from .qt.widgets.image_widget import ImageWidget
from .qt.widgets.histogram_widget import HistogramWidget
return [ScatterWidget, ImageWidget, HistogramWidget]
except ImportError:
logging.getLogger(__name__).warning(
"could not import glue.qt in ConfigObject")
return []
class LinkFunctionRegistry(Registry):
"""Stores functions to convert between quantities
The members properety is a list of (function, info_string,
output_labels) namedtuples. `info_string` is describes what the
function does. `output_labels` is a list of names for each output.
New link functions can be registered via
@link_function(info="maps degrees to arcseconds",
output_labels=['arcsec'])
def degrees2arcsec(degrees):
return degress * 3600
Link functions are expected to receive and return numpy arrays
"""
item = namedtuple('LinkFunction', 'function info output_labels')
def default_members(self):
from .core import link_helpers
return list(self.item(l, "", l.output_args)
for l in link_helpers.__LINK_FUNCTIONS__)
def __call__(self, info="", output_labels=None):
out = output_labels or []
def adder(func):
self.add(self.item(func, info, out))
return func
return adder
class LinkHelperRegistry(Registry):
"""Stores helper objects that compute many ComponentLinks at once
The members property is a list of (object, info_string,
input_labels) tuples. `Object` is the link helper. `info_string`
describes what `object` does. `input_labels` is a list labeling
the inputs.
Each link helper takes a list of ComponentIDs as inputs, and
returns an iterable object (e.g. list) of ComponentLinks.
New helpers can be registered via
@link_helper('Links degrees and arcseconds in both directions',
['degree', 'arcsecond'])
def new_helper(degree, arcsecond):
return [ComponentLink([degree], arcsecond, using=lambda d: d*3600),
ComponentLink([arcsecond], degree, using=lambda a: a/3600)]
"""
item = namedtuple('LinkHelper', 'helper info input_labels')
def default_members(self):
from .core.link_helpers import __LINK_HELPERS__ as helpers
return list(self.item(l, l.info_text, l.input_args)
for l in helpers)
def __call__(self, info, input_labels):
def adder(func):
self.add(self.item(func, info, input_labels))
return func
return adder
class ProfileFitterRegistry(Registry):
item = namedtuple('ProfileFitter', 'cls')
def add(self, cls):
"""
Add colormap *cmap* with label *label*.
"""
self.members.append(cls)
def default_members(self):
from .core.fitters import __FITTERS__
return list(__FITTERS__)
qt_client = QtClientRegistry()
data_factory = DataFactoryRegistry()
link_function = LinkFunctionRegistry()
link_helper = LinkHelperRegistry()
colormaps = ColormapRegistry()
exporters = ExporterRegistry()
settings = SettingRegistry()
fit_plugin = ProfileFitterRegistry()
def load_configuration(search_path=None):
''' Find and import a config.py file
Returns:
The module object
Raises:
Exception, if no module was found
'''
search_order = search_path or _default_search_order()
result = imp.new_module('config')
for config_file in search_order:
dir = os.path.dirname(config_file)
try:
sys.path.append(dir)
config = imp.load_source('config', config_file)
result = config
except IOError:
pass
except Exception as e:
raise Exception("Error loading config file %s:\n%s" %
(config_file, e))
finally:
sys.path.remove(dir)
return result
def _default_search_order():
"""
The default configuration file search order:
* current working directory
* environ var GLUERC
* HOME/.glue/config.py
* Glue's own default config
"""
search_order = [os.path.join(os.getcwd(), 'config.py')]
if 'GLUERC' in os.environ:
search_order.append(os.environ['GLUERC'])
search_order.append(os.path.expanduser('~/.glue/config.py'))
return search_order[::-1]
|
bsipocz/glue
|
glue/config.py
|
Python
|
bsd-3-clause
| 11,375
|
import itertools
import logging
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from mypage.pages.models import Page, Widget
from mypage.rsswidgets.models import RSSWidget
from mypage.rsswidgets.forms import RSSCreationConfigForm
log = logging.getLogger('mypage.pages.forms')
class AddOrRemoveWidgetForm(forms.Form):
COMMAND_CHOICES = [
('add-widget', _(u'add widget')),
('remove-widget', _(u'remove widget')),
]
widget = forms.ModelChoiceField(queryset=None, empty_label=None)
command = forms.ChoiceField(choices=COMMAND_CHOICES)
def __init__(self, page, data=None, files=None, *args, **kwargs):
if data is not None and data.get('command', None) not in ('add-widget', 'remove-widget'):
data = files = None
super(AddOrRemoveWidgetForm, self).__init__(data, files, *args, **kwargs)
self.page = page
commercial_ids = [id for id, x in getattr(settings, 'COMMERCIAL_EQUIVALENTS', [])]
widget_ids = [id for id in itertools.chain(*getattr(settings, 'AVAILABLE_WIDGETS', {}).values())
if id not in commercial_ids]
self.fields["widget"].queryset = Widget.objects.filter(pk__in=widget_ids)
def get_available_widgets(self):
"""
Returns available widgets grouped by category to be used in a template
"""
active_ids = self.page.widgets.values_list('pk', flat=True)
choices = dict(self.fields['widget'].choices)
return [(group, [{'id':id, 'title': choices[id], 'active': id in active_ids}
for id in ids if id in choices])
for group, ids in getattr(settings, 'AVAILABLE_WIDGETS', {}).items()]
def save(self):
"""
Adds or removes a widget to/from a page
"""
widget = self.cleaned_data['widget']
if self.cleaned_data['command'] == 'add-widget':
try:
self.page.add_widget(widget)
except ValueError, err:
log.error("Widget %s not added: %s" % (str(widget), str(err)))
for commercial_id, normal_id in getattr(settings, 'COMMERCIAL_EQUIVALENTS', []):
if widget.pk == normal_id:
self.page.remove_widget(Widget.objects.get(pk=commercial_id))
if self.cleaned_data['command'] == 'remove-widget':
self.page.remove_widget(widget)
class RemoveCustomWidgetForm(forms.Form):
widget = forms.ModelChoiceField(queryset=None, empty_label=None)
def __init__(self, page, data=None, files=None, *args, **kwargs):
if data is not None and data.get('command', None) != ('remove-custom-widget'):
data = files = None
super(RemoveCustomWidgetForm, self).__init__(data, files, *args, **kwargs)
self.page = page
global_widgets_ids = list(itertools.chain(*getattr(settings, 'AVAILABLE_WIDGETS', {}).values()))
queryset = page.widgets.filter(content_type=14).exclude(pk__in=global_widgets_ids)
self.fields["widget"].queryset = queryset
def get_custom_widgets(self):
"""
Returns a list of custom widgets to be used in a template
"""
choices = dict(self.fields['widget'].choices)
return [{'id':id, 'title': choices[id]} for id in choices]
def save(self):
"""
Removes a widget from a page
"""
self.page.remove_widget(self.cleaned_data["widget"])
class AddCustomWidgetForm(RSSCreationConfigForm):
def __init__(self, page, data=None, files=None, *args, **kwargs):
if data is not None and data.get('command', None) != ('add-custom-widget'):
data = files = None
super(AddCustomWidgetForm, self).__init__(data, files, *args, **kwargs)
self.page = page
def save(self):
"""
Creates a rss widget and adds it to a page
"""
url, title = self.cleaned_data['feed']
widget, was_created = RSSWidget.objects.get_or_create(
feed_url=url, defaults={'title': title, 'slug': ''})
self.page.add_widget(widget)
class MyRadioSelect(forms.RadioSelect):
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices)
class TemplateForm(forms.Form):
template = Page._meta.get_field('template').formfield(widget=MyRadioSelect)
def __init__(self, page, *args, **kwargs):
super(TemplateForm, self).__init__(*args, **kwargs)
self.page = page
self.initial = dict(template=page.template)
def save(self):
self.page.update_template(self.cleaned_data['template'])
self.page.save()
class TemplateConfigForm(forms.Form):
def __init__(self, page, *args, **kwargs):
super(TemplateConfigForm, self).__init__(*args, **kwargs)
self.page = page
self.initial = page.layout.template_config
self.fill_fields()
def fill_fields(self):
options = getattr(settings, 'PAGE_TEMPLATE_OPTIONS', {})
for option, value in options.items():
self.fields[option] = forms.ChoiceField(widget=MyRadioSelect, choices=value[0], initial=value[1], required=False)
def save(self):
self.page.layout.template_config.update(self.cleaned_data)
self.page.layout.template_config.save()
self.page.save()
class ChromeConfigForm(TemplateConfigForm):
"""
Setup template and template_config together
"""
template = Page._meta.get_field('template').formfield(widget=MyRadioSelect)
def __init__(self, page, *args, **kwargs):
super(ChromeConfigForm, self).__init__(*args, **kwargs)
self.initial.update(dict(template=page.template))
def save(self):
self.page.update_template(self.cleaned_data['template'])
super(ChromeConfigForm, self).save()
|
ella/mypage
|
mypage/pages/forms.py
|
Python
|
bsd-3-clause
| 6,005
|
from typing import (
IO,
Any,
BinaryIO,
Iterable,
Optional,
TextIO,
Union,
Type,
cast,
overload,
Generator,
Tuple,
)
import logging
from warnings import warn
import random
from rdflib.namespace import Namespace, RDF
from rdflib import plugin, exceptions, query, namespace
import rdflib.term
from rdflib.term import BNode, IdentifiedNode, Node, URIRef, Literal, Genid
from rdflib.paths import Path
from rdflib.store import Store
from rdflib.serializer import Serializer
from rdflib.parser import InputSource, Parser, create_input_source
from rdflib.namespace import NamespaceManager
from rdflib.resource import Resource
from rdflib.collection import Collection
import rdflib.util # avoid circular dependency
from rdflib.exceptions import ParserError
import os
import shutil
import tempfile
import pathlib
from io import BytesIO
from urllib.parse import urlparse
from urllib.request import url2pathname
assert Literal # avoid warning
assert Namespace # avoid warning
logger = logging.getLogger(__name__)
__doc__ = """\
RDFLib defines the following kinds of Graphs:
* :class:`~rdflib.graph.Graph`
* :class:`~rdflib.graph.QuotedGraph`
* :class:`~rdflib.graph.ConjunctiveGraph`
* :class:`~rdflib.graph.Dataset`
Graph
-----
An RDF graph is a set of RDF triples. Graphs support the python ``in``
operator, as well as iteration and some operations like union,
difference and intersection.
see :class:`~rdflib.graph.Graph`
Conjunctive Graph
-----------------
A Conjunctive Graph is the most relevant collection of graphs that are
considered to be the boundary for closed world assumptions. This
boundary is equivalent to that of the store instance (which is itself
uniquely identified and distinct from other instances of
:class:`Store` that signify other Conjunctive Graphs). It is
equivalent to all the named graphs within it and associated with a
``_default_`` graph which is automatically assigned a :class:`BNode`
for an identifier - if one isn't given.
see :class:`~rdflib.graph.ConjunctiveGraph`
Quoted graph
------------
The notion of an RDF graph [14] is extended to include the concept of
a formula node. A formula node may occur wherever any other kind of
node can appear. Associated with a formula node is an RDF graph that
is completely disjoint from all other graphs; i.e. has no nodes in
common with any other graph. (It may contain the same labels as other
RDF graphs; because this is, by definition, a separate graph,
considerations of tidiness do not apply between the graph at a formula
node and any other graph.)
This is intended to map the idea of "{ N3-expression }" that is used
by N3 into an RDF graph upon which RDF semantics is defined.
see :class:`~rdflib.graph.QuotedGraph`
Dataset
-------
The RDF 1.1 Dataset, a small extension to the Conjunctive Graph. The
primary term is "graphs in the datasets" and not "contexts with quads"
so there is a separate method to set/retrieve a graph in a dataset and
to operate with dataset graphs. As a consequence of this approach,
dataset graphs cannot be identified with blank nodes, a name is always
required (RDFLib will automatically add a name if one is not provided
at creation time). This implementation includes a convenience method
to directly add a single quad to a dataset graph.
see :class:`~rdflib.graph.Dataset`
Working with graphs
===================
Instantiating Graphs with default store (Memory) and default identifier
(a BNode):
>>> g = Graph()
>>> g.store.__class__
<class 'rdflib.plugins.stores.memory.Memory'>
>>> g.identifier.__class__
<class 'rdflib.term.BNode'>
Instantiating Graphs with a Memory store and an identifier -
<http://rdflib.net>:
>>> g = Graph('Memory', URIRef("http://rdflib.net"))
>>> g.identifier
rdflib.term.URIRef('http://rdflib.net')
>>> str(g) # doctest: +NORMALIZE_WHITESPACE
"<http://rdflib.net> a rdfg:Graph;rdflib:storage
[a rdflib:Store;rdfs:label 'Memory']."
Creating a ConjunctiveGraph - The top level container for all named Graphs
in a "database":
>>> g = ConjunctiveGraph()
>>> str(g.default_context)
"[a rdfg:Graph;rdflib:storage [a rdflib:Store;rdfs:label 'Memory']]."
Adding / removing reified triples to Graph and iterating over it directly or
via triple pattern:
>>> g = Graph()
>>> statementId = BNode()
>>> print(len(g))
0
>>> g.add((statementId, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((statementId, RDF.subject,
... URIRef("http://rdflib.net/store/ConjunctiveGraph"))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((statementId, RDF.predicate, namespace.RDFS.label)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((statementId, RDF.object, Literal("Conjunctive Graph"))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> print(len(g))
4
>>> for s, p, o in g:
... print(type(s))
...
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
<class 'rdflib.term.BNode'>
>>> for s, p, o in g.triples((None, RDF.object, None)):
... print(o)
...
Conjunctive Graph
>>> g.remove((statementId, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> print(len(g))
3
``None`` terms in calls to :meth:`~rdflib.graph.Graph.triples` can be
thought of as "open variables".
Graph support set-theoretic operators, you can add/subtract graphs, as
well as intersection (with multiplication operator g1*g2) and xor (g1
^ g2).
Note that BNode IDs are kept when doing set-theoretic operations, this
may or may not be what you want. Two named graphs within the same
application probably want share BNode IDs, two graphs with data from
different sources probably not. If your BNode IDs are all generated
by RDFLib they are UUIDs and unique.
>>> g1 = Graph()
>>> g2 = Graph()
>>> u = URIRef("http://example.com/foo")
>>> g1.add([u, namespace.RDFS.label, Literal("foo")]) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g1.add([u, namespace.RDFS.label, Literal("bar")]) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add([u, namespace.RDFS.label, Literal("foo")]) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add([u, namespace.RDFS.label, Literal("bing")]) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> len(g1 + g2) # adds bing as label
3
>>> len(g1 - g2) # removes foo
1
>>> len(g1 * g2) # only foo
1
>>> g1 += g2 # now g1 contains everything
Graph Aggregation - ConjunctiveGraphs and ReadOnlyGraphAggregate within
the same store:
>>> store = plugin.get("Memory", Store)()
>>> g1 = Graph(store)
>>> g2 = Graph(store)
>>> g3 = Graph(store)
>>> stmt1 = BNode()
>>> stmt2 = BNode()
>>> stmt3 = BNode()
>>> g1.add((stmt1, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g1.add((stmt1, RDF.subject,
... URIRef('http://rdflib.net/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g1.add((stmt1, RDF.predicate, namespace.RDFS.label)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g1.add((stmt1, RDF.object, Literal('Conjunctive Graph'))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add((stmt2, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add((stmt2, RDF.subject,
... URIRef('http://rdflib.net/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add((stmt2, RDF.predicate, RDF.type)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g2.add((stmt2, RDF.object, namespace.RDFS.Class)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g3.add((stmt3, RDF.type, RDF.Statement)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g3.add((stmt3, RDF.subject,
... URIRef('http://rdflib.net/store/ConjunctiveGraph'))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g3.add((stmt3, RDF.predicate, namespace.RDFS.comment)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g3.add((stmt3, RDF.object, Literal(
... 'The top-level aggregate graph - The sum ' +
... 'of all named graphs within a Store'))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> len(list(ConjunctiveGraph(store).subjects(RDF.type, RDF.Statement)))
3
>>> len(list(ReadOnlyGraphAggregate([g1,g2]).subjects(
... RDF.type, RDF.Statement)))
2
ConjunctiveGraphs have a :meth:`~rdflib.graph.ConjunctiveGraph.quads` method
which returns quads instead of triples, where the fourth item is the Graph
(or subclass thereof) instance in which the triple was asserted:
>>> uniqueGraphNames = set(
... [graph.identifier for s, p, o, graph in ConjunctiveGraph(store
... ).quads((None, RDF.predicate, None))])
>>> len(uniqueGraphNames)
3
>>> unionGraph = ReadOnlyGraphAggregate([g1, g2])
>>> uniqueGraphNames = set(
... [graph.identifier for s, p, o, graph in unionGraph.quads(
... (None, RDF.predicate, None))])
>>> len(uniqueGraphNames)
2
Parsing N3 from a string
>>> g2 = Graph()
>>> src = '''
... @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
... [ a rdf:Statement ;
... rdf:subject <http://rdflib.net/store#ConjunctiveGraph>;
... rdf:predicate rdfs:label;
... rdf:object "Conjunctive Graph" ] .
... '''
>>> g2 = g2.parse(data=src, format="n3")
>>> print(len(g2))
4
Using Namespace class:
>>> RDFLib = Namespace("http://rdflib.net/")
>>> RDFLib.ConjunctiveGraph
rdflib.term.URIRef('http://rdflib.net/ConjunctiveGraph')
>>> RDFLib["Graph"]
rdflib.term.URIRef('http://rdflib.net/Graph')
"""
__all__ = [
"Graph",
"ConjunctiveGraph",
"QuotedGraph",
"Seq",
"ModificationException",
"Dataset",
"UnSupportedAggregateOperation",
"ReadOnlyGraphAggregate",
"BatchAddGraph",
]
class Graph(Node):
"""An RDF Graph
The constructor accepts one argument, the "store"
that will be used to store the graph data (see the "store"
package for stores currently shipped with rdflib).
Stores can be context-aware or unaware. Unaware stores take up
(some) less space but cannot support features that require
context, such as true merging/demerging of sub-graphs and
provenance.
Even if used with a context-aware store, Graph will only expose the quads which
belong to the default graph. To access the rest of the data, `ConjunctiveGraph` or
`Dataset` classes can be used instead.
The Graph constructor can take an identifier which identifies the Graph
by name. If none is given, the graph is assigned a BNode for its
identifier.
For more on named graphs, see: http://www.w3.org/2004/03/trix/
"""
def __init__(
self,
store: Union[Store, str] = "default",
identifier: Optional[Union[IdentifiedNode, str]] = None,
namespace_manager: Optional[NamespaceManager] = None,
base: Optional[str] = None,
):
super(Graph, self).__init__()
self.base = base
self.__identifier: Node
self.__identifier = identifier or BNode() # type: ignore[assignment]
if not isinstance(self.__identifier, Node):
self.__identifier = URIRef(self.__identifier) # type: ignore[unreachable]
self.__store: Store
if not isinstance(store, Store):
# TODO: error handling
self.__store = store = plugin.get(store, Store)()
else:
self.__store = store
self.__namespace_manager = namespace_manager
self.context_aware = False
self.formula_aware = False
self.default_union = False
@property
def store(self):
return self.__store
@property
def identifier(self):
return self.__identifier
@property
def namespace_manager(self):
"""
this graph's namespace-manager
"""
if self.__namespace_manager is None:
self.__namespace_manager = NamespaceManager(self)
return self.__namespace_manager
@namespace_manager.setter
def namespace_manager(self, nm):
self.__namespace_manager = nm
def __repr__(self):
return "<Graph identifier=%s (%s)>" % (self.identifier, type(self))
def __str__(self):
if isinstance(self.identifier, URIRef):
return (
"%s a rdfg:Graph;rdflib:storage " + "[a rdflib:Store;rdfs:label '%s']."
) % (self.identifier.n3(), self.store.__class__.__name__)
else:
return (
"[a rdfg:Graph;rdflib:storage " + "[a rdflib:Store;rdfs:label '%s']]."
) % self.store.__class__.__name__
def toPython(self):
return self
def destroy(self, configuration):
"""Destroy the store identified by `configuration` if supported"""
self.__store.destroy(configuration)
return self
# Transactional interfaces (optional)
def commit(self):
"""Commits active transactions"""
self.__store.commit()
return self
def rollback(self):
"""Rollback active transactions"""
self.__store.rollback()
return self
def open(self, configuration, create=False):
"""Open the graph store
Might be necessary for stores that require opening a connection to a
database or acquiring some resource.
"""
return self.__store.open(configuration, create)
def close(self, commit_pending_transaction=False):
"""Close the graph store
Might be necessary for stores that require closing a connection to a
database or releasing some resource.
"""
return self.__store.close(commit_pending_transaction=commit_pending_transaction)
def add(self, triple: Tuple[Node, Node, Node]):
"""Add a triple with self as context"""
s, p, o = triple
assert isinstance(s, Node), "Subject %s must be an rdflib term" % (s,)
assert isinstance(p, Node), "Predicate %s must be an rdflib term" % (p,)
assert isinstance(o, Node), "Object %s must be an rdflib term" % (o,)
self.__store.add((s, p, o), self, quoted=False)
return self
def addN(self, quads: Iterable[Tuple[Node, Node, Node, Any]]):
"""Add a sequence of triple with context"""
self.__store.addN(
(s, p, o, c)
for s, p, o, c in quads
if isinstance(c, Graph)
and c.identifier is self.identifier
and _assertnode(s, p, o)
)
return self
def remove(self, triple):
"""Remove a triple from the graph
If the triple does not provide a context attribute, removes the triple
from all contexts.
"""
self.__store.remove(triple, context=self)
return self
@overload
def triples(
self,
triple: Tuple[
Optional[IdentifiedNode], Optional[IdentifiedNode], Optional[Node]
],
) -> Iterable[Tuple[IdentifiedNode, IdentifiedNode, Node]]:
...
@overload
def triples(
self,
triple: Tuple[Optional[IdentifiedNode], Path, Optional[Node]],
) -> Iterable[Tuple[IdentifiedNode, Path, Node]]:
...
@overload
def triples(
self,
triple: Tuple[
Optional[IdentifiedNode], Union[None, Path, IdentifiedNode], Optional[Node]
],
) -> Iterable[Tuple[IdentifiedNode, Union[IdentifiedNode, Path], Node]]:
...
def triples(
self,
triple: Tuple[
Optional[IdentifiedNode], Union[None, Path, IdentifiedNode], Optional[Node]
],
) -> Iterable[Tuple[IdentifiedNode, Union[IdentifiedNode, Path], Node]]:
"""Generator over the triple store
Returns triples that match the given triple pattern. If triple pattern
does not provide a context, all contexts will be searched.
"""
s, p, o = triple
if isinstance(p, Path):
for _s, _o in p.eval(self, s, o):
yield _s, p, _o
else:
for (_s, _p, _o), cg in self.__store.triples((s, p, o), context=self):
yield _s, _p, _o
def __getitem__(self, item):
"""
A graph can be "sliced" as a shortcut for the triples method
The python slice syntax is (ab)used for specifying triples.
A generator over matches is returned,
the returned tuples include only the parts not given
>>> import rdflib
>>> g = rdflib.Graph()
>>> g.add((rdflib.URIRef("urn:bob"), namespace.RDFS.label, rdflib.Literal("Bob"))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> list(g[rdflib.URIRef("urn:bob")]) # all triples about bob
[(rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'), rdflib.term.Literal('Bob'))]
>>> list(g[:namespace.RDFS.label]) # all label triples
[(rdflib.term.URIRef('urn:bob'), rdflib.term.Literal('Bob'))]
>>> list(g[::rdflib.Literal("Bob")]) # all triples with bob as object
[(rdflib.term.URIRef('urn:bob'), rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label'))]
Combined with SPARQL paths, more complex queries can be
written concisely:
Name of all Bobs friends:
g[bob : FOAF.knows/FOAF.name ]
Some label for Bob:
g[bob : DC.title|FOAF.name|RDFS.label]
All friends and friends of friends of Bob
g[bob : FOAF.knows * "+"]
etc.
.. versionadded:: 4.0
"""
if isinstance(item, slice):
s, p, o = item.start, item.stop, item.step
if s is None and p is None and o is None:
return self.triples((s, p, o))
elif s is None and p is None:
return self.subject_predicates(o)
elif s is None and o is None:
return self.subject_objects(p)
elif p is None and o is None:
return self.predicate_objects(s)
elif s is None:
return self.subjects(p, o)
elif p is None:
return self.predicates(s, o)
elif o is None:
return self.objects(s, p)
else:
# all given
return (s, p, o) in self
elif isinstance(item, (Path, Node)):
return self.predicate_objects(item)
else:
raise TypeError(
"You can only index a graph by a single rdflib term or path, or a slice of rdflib terms."
)
def __len__(self):
"""Returns the number of triples in the graph
If context is specified then the number of triples in the context is
returned instead.
"""
return self.__store.__len__(context=self)
def __iter__(self):
"""Iterates over all triples in the store"""
return self.triples((None, None, None))
def __contains__(self, triple):
"""Support for 'triple in graph' syntax"""
for triple in self.triples(triple):
return True
return False
def __hash__(self):
return hash(self.identifier)
def __cmp__(self, other):
if other is None:
return -1
elif isinstance(other, Graph):
return (self.identifier > other.identifier) - (
self.identifier < other.identifier
)
else:
# Note if None is considered equivalent to owl:Nothing
# Then perhaps a graph with length 0 should be considered
# equivalent to None (if compared to it)?
return 1
def __eq__(self, other):
return isinstance(other, Graph) and self.identifier == other.identifier
def __lt__(self, other):
return (other is None) or (
isinstance(other, Graph) and self.identifier < other.identifier
)
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return (isinstance(other, Graph) and self.identifier > other.identifier) or (
other is not None
)
def __ge__(self, other):
return self > other or self == other
def __iadd__(self, other):
"""Add all triples in Graph other to Graph.
BNode IDs are not changed."""
self.addN((s, p, o, self) for s, p, o in other)
return self
def __isub__(self, other):
"""Subtract all triples in Graph other from Graph.
BNode IDs are not changed."""
for triple in other:
self.remove(triple)
return self
def __add__(self, other):
"""Set-theoretic union
BNode IDs are not changed."""
try:
retval = type(self)()
except TypeError:
retval = Graph()
for (prefix, uri) in set(list(self.namespaces()) + list(other.namespaces())):
retval.bind(prefix, uri)
for x in self:
retval.add(x)
for y in other:
retval.add(y)
return retval
def __mul__(self, other):
"""Set-theoretic intersection.
BNode IDs are not changed."""
try:
retval = type(self)()
except TypeError:
retval = Graph()
for x in other:
if x in self:
retval.add(x)
return retval
def __sub__(self, other):
"""Set-theoretic difference.
BNode IDs are not changed."""
try:
retval = type(self)()
except TypeError:
retval = Graph()
for x in self:
if x not in other:
retval.add(x)
return retval
def __xor__(self, other):
"""Set-theoretic XOR.
BNode IDs are not changed."""
return (self - other) + (other - self)
__or__ = __add__
__and__ = __mul__
# Conv. methods
def set(self, triple):
"""Convenience method to update the value of object
Remove any existing triples for subject and predicate before adding
(subject, predicate, object).
"""
(subject, predicate, object_) = triple
assert (
subject is not None
), "s can't be None in .set([s,p,o]), as it would remove (*, p, *)"
assert (
predicate is not None
), "p can't be None in .set([s,p,o]), as it would remove (s, *, *)"
self.remove((subject, predicate, None))
self.add((subject, predicate, object_))
return self
def subjects(
self,
predicate: Union[None, Path, IdentifiedNode] = None,
object: Optional[Node] = None,
unique: bool = False,
) -> Iterable[IdentifiedNode]:
"""A generator of (optionally unique) subjects with the given
predicate and object"""
if not unique:
for s, p, o in self.triples((None, predicate, object)):
yield s
else:
subs = set()
for s, p, o in self.triples((None, predicate, object)):
if s not in subs:
yield s
try:
subs.add(s)
except MemoryError as e:
logger.error(
f"{e}. Consider not setting parameter 'unique' to True"
)
raise
def predicates(
self,
subject: Optional[IdentifiedNode] = None,
object: Optional[Node] = None,
unique: bool = False,
) -> Iterable[IdentifiedNode]:
"""A generator of (optionally unique) predicates with the given
subject and object"""
if not unique:
for s, p, o in self.triples((subject, None, object)):
yield p
else:
preds = set()
for s, p, o in self.triples((subject, None, object)):
if p not in preds:
yield p
try:
preds.add(p)
except MemoryError as e:
logger.error(
f"{e}. Consider not setting parameter 'unique' to True"
)
raise
def objects(
self,
subject: Optional[IdentifiedNode] = None,
predicate: Union[None, Path, IdentifiedNode] = None,
unique: bool = False,
) -> Iterable[Node]:
"""A generator of (optionally unique) objects with the given
subject and predicate"""
if not unique:
for s, p, o in self.triples((subject, predicate, None)):
yield o
else:
objs = set()
for s, p, o in self.triples((subject, predicate, None)):
if o not in objs:
yield o
try:
objs.add(o)
except MemoryError as e:
logger.error(
f"{e}. Consider not setting parameter 'unique' to True"
)
raise
def subject_predicates(
self, object: Optional[Node] = None, unique: bool = False
) -> Generator[Tuple[IdentifiedNode, IdentifiedNode], None, None]:
"""A generator of (optionally unique) (subject, predicate) tuples
for the given object"""
if not unique:
for s, p, o in self.triples((None, None, object)):
yield s, p
else:
subj_preds = set()
for s, p, o in self.triples((None, None, object)):
if (s, p) not in subj_preds:
yield s, p
try:
subj_preds.add((s, p))
except MemoryError as e:
logger.error(
f"{e}. Consider not setting parameter 'unique' to True"
)
raise
def subject_objects(
self, predicate: Union[None, Path, IdentifiedNode] = None, unique: bool = False
) -> Generator[Tuple[IdentifiedNode, Node], None, None]:
"""A generator of (optionally unique) (subject, object) tuples
for the given predicate"""
if not unique:
for s, p, o in self.triples((None, predicate, None)):
yield s, o
else:
subj_objs = set()
for s, p, o in self.triples((None, predicate, None)):
if (s, o) not in subj_objs:
yield s, o
try:
subj_objs.add((s, o))
except MemoryError as e:
logger.error(
f"{e}. Consider not setting parameter 'unique' to True"
)
raise
def predicate_objects(
self, subject: Optional[IdentifiedNode] = None, unique: bool = False
) -> Generator[Tuple[IdentifiedNode, Node], None, None]:
"""A generator of (optionally unique) (predicate, object) tuples
for the given subject"""
if not unique:
for s, p, o in self.triples((subject, None, None)):
yield p, o
else:
pred_objs = set()
for s, p, o in self.triples((subject, None, None)):
if (p, o) not in pred_objs:
yield p, o
try:
pred_objs.add((p, o))
except MemoryError as e:
logger.error(
f"{e}. Consider not setting parameter 'unique' to True"
)
raise
def triples_choices(self, triple, context=None):
subject, predicate, object_ = triple
for (s, p, o), cg in self.store.triples_choices(
(subject, predicate, object_), context=self
):
yield s, p, o
def value(
self, subject=None, predicate=RDF.value, object=None, default=None, any=True
):
"""Get a value for a pair of two criteria
Exactly one of subject, predicate, object must be None. Useful if one
knows that there may only be one value.
It is one of those situations that occur a lot, hence this
'macro' like utility
Parameters:
subject, predicate, object -- exactly one must be None
default -- value to be returned if no values found
any -- if True, return any value in the case there is more than one,
else, raise UniquenessError
"""
retval = default
if (
(subject is None and predicate is None)
or (subject is None and object is None)
or (predicate is None and object is None)
):
return None
if object is None:
values = self.objects(subject, predicate)
if subject is None:
values = self.subjects(predicate, object)
if predicate is None:
values = self.predicates(subject, object)
try:
retval = next(values)
except StopIteration:
retval = default
else:
if any is False:
try:
next(values)
msg = (
"While trying to find a value for (%s, %s, %s) the"
" following multiple values where found:\n"
% (subject, predicate, object)
)
triples = self.store.triples((subject, predicate, object), None)
for (s, p, o), contexts in triples:
msg += "(%s, %s, %s)\n (contexts: %s)\n" % (
s,
p,
o,
list(contexts),
)
raise exceptions.UniquenessError(msg)
except StopIteration:
pass
return retval
def items(self, list):
"""Generator over all items in the resource specified by list
list is an RDF collection.
"""
chain = set([list])
while list:
item = self.value(list, RDF.first)
if item is not None:
yield item
list = self.value(list, RDF.rest)
if list in chain:
raise ValueError("List contains a recursive rdf:rest reference")
chain.add(list)
def transitiveClosure(self, func, arg, seen=None):
"""
Generates transitive closure of a user-defined
function against the graph
>>> from rdflib.collection import Collection
>>> g=Graph()
>>> a=BNode("foo")
>>> b=BNode("bar")
>>> c=BNode("baz")
>>> g.add((a,RDF.first,RDF.type)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((a,RDF.rest,b)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((b,RDF.first,namespace.RDFS.label)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((b,RDF.rest,c)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((c,RDF.first,namespace.RDFS.comment)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> g.add((c,RDF.rest,RDF.nil)) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> def topList(node,g):
... for s in g.subjects(RDF.rest, node):
... yield s
>>> def reverseList(node,g):
... for f in g.objects(node, RDF.first):
... print(f)
... for s in g.subjects(RDF.rest, node):
... yield s
>>> [rt for rt in g.transitiveClosure(
... topList,RDF.nil)] # doctest: +NORMALIZE_WHITESPACE
[rdflib.term.BNode('baz'),
rdflib.term.BNode('bar'),
rdflib.term.BNode('foo')]
>>> [rt for rt in g.transitiveClosure(
... reverseList,RDF.nil)] # doctest: +NORMALIZE_WHITESPACE
http://www.w3.org/2000/01/rdf-schema#comment
http://www.w3.org/2000/01/rdf-schema#label
http://www.w3.org/1999/02/22-rdf-syntax-ns#type
[rdflib.term.BNode('baz'),
rdflib.term.BNode('bar'),
rdflib.term.BNode('foo')]
"""
if seen is None:
seen = {}
elif arg in seen:
return
seen[arg] = 1
for rt in func(arg, self):
yield rt
for rt_2 in self.transitiveClosure(func, rt, seen):
yield rt_2
def transitive_objects(self, subject, predicate, remember=None):
"""Transitively generate objects for the ``predicate`` relationship
Generated objects belong to the depth first transitive closure of the
``predicate`` relationship starting at ``subject``.
"""
if remember is None:
remember = {}
if subject in remember:
return
remember[subject] = 1
yield subject
for object in self.objects(subject, predicate):
for o in self.transitive_objects(object, predicate, remember):
yield o
def transitive_subjects(self, predicate, object, remember=None):
"""Transitively generate subjects for the ``predicate`` relationship
Generated subjects belong to the depth first transitive closure of the
``predicate`` relationship starting at ``object``.
"""
if remember is None:
remember = {}
if object in remember:
return
remember[object] = 1
yield object
for subject in self.subjects(predicate, object):
for s in self.transitive_subjects(predicate, subject, remember):
yield s
def qname(self, uri):
return self.namespace_manager.qname(uri)
def compute_qname(self, uri, generate=True):
return self.namespace_manager.compute_qname(uri, generate)
def bind(self, prefix, namespace, override=True, replace=False) -> None:
"""Bind prefix to namespace
If override is True will bind namespace to given prefix even
if namespace was already bound to a different prefix.
if replace, replace any existing prefix with the new namespace
for example: graph.bind("foaf", "http://xmlns.com/foaf/0.1/")
"""
return self.namespace_manager.bind(
prefix, namespace, override=override, replace=replace
)
def namespaces(self):
"""Generator over all the prefix, namespace tuples"""
for prefix, namespace in self.namespace_manager.namespaces():
yield prefix, namespace
def absolutize(self, uri, defrag=1):
"""Turn uri into an absolute URI if it's not one already"""
return self.namespace_manager.absolutize(uri, defrag)
# no destination and non-None positional encoding
@overload
def serialize(
self, destination: None, format: str, base: Optional[str], encoding: str, **args
) -> bytes:
...
# no destination and non-None keyword encoding
@overload
def serialize(
self,
destination: None = ...,
format: str = ...,
base: Optional[str] = ...,
*,
encoding: str,
**args,
) -> bytes:
...
# no destination and None encoding
@overload
def serialize(
self,
destination: None = ...,
format: str = ...,
base: Optional[str] = ...,
encoding: None = ...,
**args,
) -> str:
...
# non-None destination
@overload
def serialize(
self,
destination: Union[str, pathlib.PurePath, IO[bytes]],
format: str = ...,
base: Optional[str] = ...,
encoding: Optional[str] = ...,
**args,
) -> "Graph":
...
# fallback
@overload
def serialize(
self,
destination: Optional[Union[str, pathlib.PurePath, IO[bytes]]] = ...,
format: str = ...,
base: Optional[str] = ...,
encoding: Optional[str] = ...,
**args,
) -> Union[bytes, str, "Graph"]:
...
def serialize(
self,
destination: Optional[Union[str, pathlib.PurePath, IO[bytes]]] = None,
format: str = "turtle",
base: Optional[str] = None,
encoding: Optional[str] = None,
**args: Any,
) -> Union[bytes, str, "Graph"]:
"""Serialize the Graph to destination
If destination is None serialize method returns the serialization as
bytes or string.
If encoding is None and destination is None, returns a string
If encoding is set, and Destination is None, returns bytes
Format defaults to turtle.
Format support can be extended with plugins,
but "xml", "n3", "turtle", "nt", "pretty-xml", "trix", "trig" and "nquads" are built in.
"""
# if base is not given as attribute use the base set for the graph
if base is None:
base = self.base
serializer = plugin.get(format, Serializer)(self)
stream: IO[bytes]
if destination is None:
stream = BytesIO()
if encoding is None:
serializer.serialize(stream, base=base, encoding="utf-8", **args)
return stream.getvalue().decode("utf-8")
else:
serializer.serialize(stream, base=base, encoding=encoding, **args)
return stream.getvalue()
if hasattr(destination, "write"):
stream = cast(IO[bytes], destination)
serializer.serialize(stream, base=base, encoding=encoding, **args)
else:
if isinstance(destination, pathlib.PurePath):
location = str(destination)
else:
location = cast(str, destination)
scheme, netloc, path, params, _query, fragment = urlparse(location)
if netloc != "":
raise ValueError(
f"destination {destination} is not a local file reference"
)
fd, name = tempfile.mkstemp()
stream = os.fdopen(fd, "wb")
serializer.serialize(stream, base=base, encoding=encoding, **args)
stream.close()
dest = url2pathname(path) if scheme == "file" else location
if hasattr(shutil, "move"):
shutil.move(name, dest)
else:
shutil.copy(name, dest)
os.remove(name)
return self
def print(self, format="turtle", encoding="utf-8", out=None):
print(
self.serialize(None, format=format, encoding=encoding).decode(encoding),
file=out,
flush=True,
)
def parse(
self,
source: Optional[
Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath]
] = None,
publicID: Optional[str] = None,
format: Optional[str] = None,
location: Optional[str] = None,
file: Optional[Union[BinaryIO, TextIO]] = None,
data: Optional[Union[str, bytes]] = None,
**args,
):
"""
Parse an RDF source adding the resulting triples to the Graph.
The source is specified using one of source, location, file or
data.
:Parameters:
- `source`: An InputSource, file-like object, or string. In the case
of a string the string is the location of the source.
- `location`: A string indicating the relative or absolute URL of the
source. Graph's absolutize method is used if a relative location
is specified.
- `file`: A file-like object.
- `data`: A string containing the data to be parsed.
- `format`: Used if format can not be determined from source, e.g. file
extension or Media Type. Defaults to text/turtle. Format support can
be extended with plugins, but "xml", "n3" (use for turtle), "nt" &
"trix" are built in.
- `publicID`: the logical URI to use as the document base. If None
specified the document location is used (at least in the case where
there is a document location).
:Returns:
- self, the graph instance.
Examples:
>>> my_data = '''
... <rdf:RDF
... xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
... xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
... >
... <rdf:Description>
... <rdfs:label>Example</rdfs:label>
... <rdfs:comment>This is really just an example.</rdfs:comment>
... </rdf:Description>
... </rdf:RDF>
... '''
>>> import tempfile
>>> fd, file_name = tempfile.mkstemp()
>>> f = os.fdopen(fd, "w")
>>> dummy = f.write(my_data) # Returns num bytes written
>>> f.close()
>>> g = Graph()
>>> result = g.parse(data=my_data, format="application/rdf+xml")
>>> len(g)
2
>>> g = Graph()
>>> result = g.parse(location=file_name, format="application/rdf+xml")
>>> len(g)
2
>>> g = Graph()
>>> with open(file_name, "r") as f:
... result = g.parse(f, format="application/rdf+xml")
>>> len(g)
2
>>> os.remove(file_name)
>>> # default turtle parsing
>>> result = g.parse(data="<http://example.com/a> <http://example.com/a> <http://example.com/a> .")
>>> len(g)
3
"""
source = create_input_source(
source=source,
publicID=publicID,
location=location,
file=file,
data=data,
format=format,
)
if format is None:
format = source.content_type
could_not_guess_format = False
if format is None:
if (
hasattr(source, "file")
and getattr(source.file, "name", None) # type: ignore[attr-defined]
and isinstance(source.file.name, str) # type: ignore[attr-defined]
):
format = rdflib.util.guess_format(source.file.name) # type: ignore[attr-defined]
if format is None:
format = "turtle"
could_not_guess_format = True
parser = plugin.get(format, Parser)()
try:
# TODO FIXME: Parser.parse should have **kwargs argument.
parser.parse(source, self, **args)
except SyntaxError as se:
if could_not_guess_format:
raise ParserError(
"Could not guess RDF format for %r from file extension so tried Turtle but failed."
"You can explicitly specify format using the format argument."
% source
)
else:
raise se
finally:
if source.auto_close:
source.close()
return self
def query(
self,
query_object,
processor: Union[str, query.Processor] = "sparql",
result: Union[str, Type[query.Result]] = "sparql",
initNs=None,
initBindings=None,
use_store_provided: bool = True,
**kwargs,
) -> query.Result:
"""
Query this graph.
A type of 'prepared queries' can be realised by providing
initial variable bindings with initBindings
Initial namespaces are used to resolve prefixes used in the query,
if none are given, the namespaces from the graph's namespace manager
are used.
:returntype: rdflib.query.Result
"""
initBindings = initBindings or {}
initNs = initNs or dict(self.namespaces())
if hasattr(self.store, "query") and use_store_provided:
try:
return self.store.query(
query_object,
initNs,
initBindings,
self.default_union and "__UNION__" or self.identifier,
**kwargs,
)
except NotImplementedError:
pass # store has no own implementation
if not isinstance(result, query.Result):
result = plugin.get(cast(str, result), query.Result)
if not isinstance(processor, query.Processor):
processor = plugin.get(processor, query.Processor)(self)
return result(processor.query(query_object, initBindings, initNs, **kwargs))
def update(
self,
update_object,
processor="sparql",
initNs=None,
initBindings=None,
use_store_provided=True,
**kwargs,
):
"""Update this graph with the given update query."""
initBindings = initBindings or {}
initNs = initNs or dict(self.namespaces())
if hasattr(self.store, "update") and use_store_provided:
try:
return self.store.update(
update_object,
initNs,
initBindings,
self.default_union and "__UNION__" or self.identifier,
**kwargs,
)
except NotImplementedError:
pass # store has no own implementation
if not isinstance(processor, query.UpdateProcessor):
processor = plugin.get(processor, query.UpdateProcessor)(self)
return processor.update(update_object, initBindings, initNs, **kwargs)
def n3(self):
"""Return an n3 identifier for the Graph"""
return "[%s]" % self.identifier.n3()
def __reduce__(self):
return (
Graph,
(
self.store,
self.identifier,
),
)
def isomorphic(self, other):
"""
does a very basic check if these graphs are the same
If no BNodes are involved, this is accurate.
See rdflib.compare for a correct implementation of isomorphism checks
"""
# TODO: this is only an approximation.
if len(self) != len(other):
return False
for s, p, o in self:
if not isinstance(s, BNode) and not isinstance(o, BNode):
if not (s, p, o) in other:
return False
for s, p, o in other:
if not isinstance(s, BNode) and not isinstance(o, BNode):
if not (s, p, o) in self:
return False
# TODO: very well could be a false positive at this point yet.
return True
def connected(self):
"""Check if the Graph is connected
The Graph is considered undirectional.
Performs a search on the Graph, starting from a random node. Then
iteratively goes depth-first through the triplets where the node is
subject and object. Return True if all nodes have been visited and
False if it cannot continue and there are still unvisited nodes left.
"""
all_nodes = list(self.all_nodes())
discovered = []
# take a random one, could also always take the first one, doesn't
# really matter.
if not all_nodes:
return False
visiting = [all_nodes[random.randrange(len(all_nodes))]]
while visiting:
x = visiting.pop()
if x not in discovered:
discovered.append(x)
for new_x in self.objects(subject=x):
if new_x not in discovered and new_x not in visiting:
visiting.append(new_x)
for new_x in self.subjects(object=x):
if new_x not in discovered and new_x not in visiting:
visiting.append(new_x)
# optimisation by only considering length, since no new objects can
# be introduced anywhere.
if len(all_nodes) == len(discovered):
return True
else:
return False
def all_nodes(self):
res = set(self.objects())
res.update(self.subjects())
return res
def collection(self, identifier):
"""Create a new ``Collection`` instance.
Parameters:
- ``identifier``: a URIRef or BNode instance.
Example::
>>> graph = Graph()
>>> uri = URIRef("http://example.org/resource")
>>> collection = graph.collection(uri)
>>> assert isinstance(collection, Collection)
>>> assert collection.uri is uri
>>> assert collection.graph is graph
>>> collection += [ Literal(1), Literal(2) ]
"""
return Collection(self, identifier)
def resource(self, identifier):
"""Create a new ``Resource`` instance.
Parameters:
- ``identifier``: a URIRef or BNode instance.
Example::
>>> graph = Graph()
>>> uri = URIRef("http://example.org/resource")
>>> resource = graph.resource(uri)
>>> assert isinstance(resource, Resource)
>>> assert resource.identifier is uri
>>> assert resource.graph is graph
"""
if not isinstance(identifier, Node):
identifier = URIRef(identifier)
return Resource(self, identifier)
def _process_skolem_tuples(self, target, func):
for t in self.triples((None, None, None)):
target.add(func(t))
def skolemize(self, new_graph=None, bnode=None, authority=None, basepath=None):
def do_skolemize(bnode, t):
(s, p, o) = t
if s == bnode:
s = s.skolemize(authority=authority, basepath=basepath)
if o == bnode:
o = o.skolemize(authority=authority, basepath=basepath)
return s, p, o
def do_skolemize2(t):
(s, p, o) = t
if isinstance(s, BNode):
s = s.skolemize(authority=authority, basepath=basepath)
if isinstance(o, BNode):
o = o.skolemize(authority=authority, basepath=basepath)
return s, p, o
retval = Graph() if new_graph is None else new_graph
if bnode is None:
self._process_skolem_tuples(retval, do_skolemize2)
elif isinstance(bnode, BNode):
self._process_skolem_tuples(retval, lambda t: do_skolemize(bnode, t))
return retval
def de_skolemize(self, new_graph=None, uriref=None):
def do_de_skolemize(uriref, t):
(s, p, o) = t
if s == uriref:
s = s.de_skolemize()
if o == uriref:
o = o.de_skolemize()
return s, p, o
def do_de_skolemize2(t):
(s, p, o) = t
if isinstance(s, Genid):
s = s.de_skolemize()
if isinstance(o, Genid):
o = o.de_skolemize()
return s, p, o
retval = Graph() if new_graph is None else new_graph
if uriref is None:
self._process_skolem_tuples(retval, do_de_skolemize2)
elif isinstance(uriref, Genid):
self._process_skolem_tuples(retval, lambda t: do_de_skolemize(uriref, t))
return retval
def cbd(self, resource):
"""Retrieves the Concise Bounded Description of a Resource from a Graph
Concise Bounded Description (CBD) is defined in [1] as:
Given a particular node (the starting node) in a particular RDF graph (the source graph), a subgraph of that
particular graph, taken to comprise a concise bounded description of the resource denoted by the starting node,
can be identified as follows:
1. Include in the subgraph all statements in the source graph where the subject of the statement is the
starting node;
2. Recursively, for all statements identified in the subgraph thus far having a blank node object, include
in the subgraph all statements in the source graph where the subject of the statement is the blank node
in question and which are not already included in the subgraph.
3. Recursively, for all statements included in the subgraph thus far, for all reifications of each statement
in the source graph, include the concise bounded description beginning from the rdf:Statement node of
each reification.
This results in a subgraph where the object nodes are either URI references, literals, or blank nodes not
serving as the subject of any statement in the graph.
[1] https://www.w3.org/Submission/CBD/
:param resource: a URIRef object, of the Resource for queried for
:return: a Graph, subgraph of self
"""
subgraph = Graph()
def add_to_cbd(uri):
for s, p, o in self.triples((uri, None, None)):
subgraph.add((s, p, o))
# recurse 'down' through ll Blank Nodes
if type(o) == BNode and not (o, None, None) in subgraph:
add_to_cbd(o)
# for Rule 3 (reification)
# for any rdf:Statement in the graph with the given URI as the object of rdf:subject,
# get all triples with that rdf:Statement instance as subject
# find any subject s where the predicate is rdf:subject and this uri is the object
# (these subjects are of type rdf:Statement, given the domain of rdf:subject)
for s, p, o in self.triples((None, RDF.subject, uri)):
# find all triples with s as the subject and add these to the subgraph
for s2, p2, o2 in self.triples((s, None, None)):
subgraph.add((s2, p2, o2))
add_to_cbd(resource)
return subgraph
class ConjunctiveGraph(Graph):
"""A ConjunctiveGraph is an (unnamed) aggregation of all the named
graphs in a store.
It has a ``default`` graph, whose name is associated with the
graph throughout its life. :meth:`__init__` can take an identifier
to use as the name of this default graph or it will assign a
BNode.
All methods that add triples work against this default graph.
All queries are carried out against the union of all graphs.
"""
def __init__(
self,
store: Union[Store, str] = "default",
identifier: Optional[Union[IdentifiedNode, str]] = None,
default_graph_base: Optional[str] = None,
):
super(ConjunctiveGraph, self).__init__(store, identifier=identifier)
assert self.store.context_aware, (
"ConjunctiveGraph must be backed by" " a context aware store."
)
self.context_aware = True
self.default_union = True # Conjunctive!
self.default_context = Graph(
store=self.store, identifier=identifier or BNode(), base=default_graph_base
)
def __str__(self):
pattern = (
"[a rdflib:ConjunctiveGraph;rdflib:storage "
"[a rdflib:Store;rdfs:label '%s']]"
)
return pattern % self.store.__class__.__name__
@overload
def _spoc(
self,
triple_or_quad: Union[
Tuple[Node, Node, Node, Optional[Any]], Tuple[Node, Node, Node]
],
default: bool = False,
) -> Tuple[Node, Node, Node, Optional[Graph]]:
...
@overload
def _spoc(
self,
triple_or_quad: None,
default: bool = False,
) -> Tuple[None, None, None, Optional[Graph]]:
...
def _spoc(
self,
triple_or_quad: Optional[
Union[Tuple[Node, Node, Node, Optional[Any]], Tuple[Node, Node, Node]]
],
default: bool = False,
) -> Tuple[Optional[Node], Optional[Node], Optional[Node], Optional[Graph]]:
"""
helper method for having methods that support
either triples or quads
"""
if triple_or_quad is None:
return (None, None, None, self.default_context if default else None)
if len(triple_or_quad) == 3:
c = self.default_context if default else None
(s, p, o) = triple_or_quad # type: ignore[misc]
elif len(triple_or_quad) == 4:
(s, p, o, c) = triple_or_quad # type: ignore[misc]
c = self._graph(c)
return s, p, o, c
def __contains__(self, triple_or_quad):
"""Support for 'triple/quad in graph' syntax"""
s, p, o, c = self._spoc(triple_or_quad)
for t in self.triples((s, p, o), context=c):
return True
return False
def add(
self,
triple_or_quad: Union[
Tuple[Node, Node, Node, Optional[Any]], Tuple[Node, Node, Node]
],
) -> "ConjunctiveGraph":
"""
Add a triple or quad to the store.
if a triple is given it is added to the default context
"""
s, p, o, c = self._spoc(triple_or_quad, default=True)
_assertnode(s, p, o)
self.store.add((s, p, o), context=c, quoted=False)
return self
@overload
def _graph(self, c: Union[Graph, Node, str]) -> Graph:
...
@overload
def _graph(self, c: None) -> None:
...
def _graph(self, c: Optional[Union[Graph, Node, str]]) -> Optional[Graph]:
if c is None:
return None
if not isinstance(c, Graph):
return self.get_context(c)
else:
return c
def addN(self, quads: Iterable[Tuple[Node, Node, Node, Any]]):
"""Add a sequence of triples with context"""
self.store.addN(
(s, p, o, self._graph(c)) for s, p, o, c in quads if _assertnode(s, p, o)
)
return self
def remove(self, triple_or_quad):
"""
Removes a triple or quads
if a triple is given it is removed from all contexts
a quad is removed from the given context only
"""
s, p, o, c = self._spoc(triple_or_quad)
self.store.remove((s, p, o), context=c)
return self
def triples(self, triple_or_quad, context=None):
"""
Iterate over all the triples in the entire conjunctive graph
For legacy reasons, this can take the context to query either
as a fourth element of the quad, or as the explicit context
keyword parameter. The kw param takes precedence.
"""
s, p, o, c = self._spoc(triple_or_quad)
context = self._graph(context or c)
if self.default_union:
if context == self.default_context:
context = None
else:
if context is None:
context = self.default_context
if isinstance(p, Path):
if context is None:
context = self
for s, o in p.eval(context, s, o):
yield s, p, o
else:
for (s, p, o), cg in self.store.triples((s, p, o), context=context):
yield s, p, o
def quads(self, triple_or_quad=None):
"""Iterate over all the quads in the entire conjunctive graph"""
s, p, o, c = self._spoc(triple_or_quad)
for (s, p, o), cg in self.store.triples((s, p, o), context=c):
for ctx in cg:
yield s, p, o, ctx
def triples_choices(self, triple, context=None):
"""Iterate over all the triples in the entire conjunctive graph"""
s, p, o = triple
if context is None:
if not self.default_union:
context = self.default_context
else:
context = self._graph(context)
for (s1, p1, o1), cg in self.store.triples_choices((s, p, o), context=context):
yield s1, p1, o1
def __len__(self):
"""Number of triples in the entire conjunctive graph"""
return self.store.__len__()
def contexts(self, triple=None):
"""Iterate over all contexts in the graph
If triple is specified, iterate over all contexts the triple is in.
"""
for context in self.store.contexts(triple):
if isinstance(context, Graph):
# TODO: One of these should never happen and probably
# should raise an exception rather than smoothing over
# the weirdness - see #225
yield context
else:
yield self.get_context(context)
def get_context(
self,
identifier: Optional[Union[Node, str]],
quoted: bool = False,
base: Optional[str] = None,
) -> Graph:
"""Return a context graph for the given identifier
identifier must be a URIRef or BNode.
"""
# TODO: FIXME - why is ConjunctiveGraph passed as namespace_manager?
return Graph(
store=self.store, identifier=identifier, namespace_manager=self, base=base # type: ignore[arg-type]
)
def remove_context(self, context):
"""Removes the given context from the graph"""
self.store.remove((None, None, None), context)
def context_id(self, uri, context_id=None):
"""URI#context"""
uri = uri.split("#", 1)[0]
if context_id is None:
context_id = "#context"
return URIRef(context_id, base=uri)
def parse(
self,
source: Optional[
Union[IO[bytes], TextIO, InputSource, str, bytes, pathlib.PurePath]
] = None,
publicID: Optional[str] = None,
format: Optional[str] = None,
location: Optional[str] = None,
file: Optional[Union[BinaryIO, TextIO]] = None,
data: Optional[Union[str, bytes]] = None,
**args,
):
"""
Parse source adding the resulting triples to its own context
(sub graph of this graph).
See :meth:`rdflib.graph.Graph.parse` for documentation on arguments.
:Returns:
The graph into which the source was parsed. In the case of n3
it returns the root context.
"""
source = create_input_source(
source=source,
publicID=publicID,
location=location,
file=file,
data=data,
format=format,
)
# NOTE on type hint: `xml.sax.xmlreader.InputSource.getPublicId` has no
# type annotations but given that systemId should be a string, and
# given that there is no specific mention of type for publicId, it
# seems reasonable to assume it should also be a string. Furthermore,
# create_input_source will ensure that publicId is not None, though it
# would be good if this guaruntee was made more explicit i.e. by type
# hint on InputSource (TODO/FIXME).
g_id: str = publicID and publicID or source.getPublicId()
if not isinstance(g_id, Node):
g_id = URIRef(g_id)
context = Graph(store=self.store, identifier=g_id)
context.remove((None, None, None)) # hmm ?
context.parse(source, publicID=publicID, format=format, **args)
# TODO: FIXME: This should not return context, but self.
return context
def __reduce__(self):
return ConjunctiveGraph, (self.store, self.identifier)
DATASET_DEFAULT_GRAPH_ID = URIRef("urn:x-rdflib:default")
class Dataset(ConjunctiveGraph):
__doc__ = """
RDF 1.1 Dataset. Small extension to the Conjunctive Graph:
- the primary term is graphs in the datasets and not contexts with quads,
so there is a separate method to set/retrieve a graph in a dataset and
operate with graphs
- graphs cannot be identified with blank nodes
- added a method to directly add a single quad
Examples of usage:
>>> # Create a new Dataset
>>> ds = Dataset()
>>> # simple triples goes to default graph
>>> ds.add((URIRef("http://example.org/a"),
... URIRef("http://www.example.org/b"),
... Literal("foo"))) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Dataset'>)>
>>>
>>> # Create a graph in the dataset, if the graph name has already been
>>> # used, the corresponding graph will be returned
>>> # (ie, the Dataset keeps track of the constituent graphs)
>>> g = ds.graph(URIRef("http://www.example.com/gr"))
>>>
>>> # add triples to the new graph as usual
>>> g.add(
... (URIRef("http://example.org/x"),
... URIRef("http://example.org/y"),
... Literal("bar")) ) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Graph'>)>
>>> # alternatively: add a quad to the dataset -> goes to the graph
>>> ds.add(
... (URIRef("http://example.org/x"),
... URIRef("http://example.org/z"),
... Literal("foo-bar"),g) ) # doctest: +ELLIPSIS
<Graph identifier=... (<class 'rdflib.graph.Dataset'>)>
>>>
>>> # querying triples return them all regardless of the graph
>>> for t in ds.triples((None,None,None)): # doctest: +SKIP
... print(t) # doctest: +NORMALIZE_WHITESPACE
(rdflib.term.URIRef("http://example.org/a"),
rdflib.term.URIRef("http://www.example.org/b"),
rdflib.term.Literal("foo"))
(rdflib.term.URIRef("http://example.org/x"),
rdflib.term.URIRef("http://example.org/z"),
rdflib.term.Literal("foo-bar"))
(rdflib.term.URIRef("http://example.org/x"),
rdflib.term.URIRef("http://example.org/y"),
rdflib.term.Literal("bar"))
>>>
>>> # querying quads() return quads; the fourth argument can be unrestricted
>>> # (None) or restricted to a graph
>>> for q in ds.quads((None, None, None, None)): # doctest: +SKIP
... print(q) # doctest: +NORMALIZE_WHITESPACE
(rdflib.term.URIRef("http://example.org/a"),
rdflib.term.URIRef("http://www.example.org/b"),
rdflib.term.Literal("foo"),
None)
(rdflib.term.URIRef("http://example.org/x"),
rdflib.term.URIRef("http://example.org/y"),
rdflib.term.Literal("bar"),
rdflib.term.URIRef("http://www.example.com/gr"))
(rdflib.term.URIRef("http://example.org/x"),
rdflib.term.URIRef("http://example.org/z"),
rdflib.term.Literal("foo-bar"),
rdflib.term.URIRef("http://www.example.com/gr"))
>>>
>>> # unrestricted looping is equivalent to iterating over the entire Dataset
>>> for q in ds: # doctest: +SKIP
... print(q) # doctest: +NORMALIZE_WHITESPACE
(rdflib.term.URIRef("http://example.org/a"),
rdflib.term.URIRef("http://www.example.org/b"),
rdflib.term.Literal("foo"),
None)
(rdflib.term.URIRef("http://example.org/x"),
rdflib.term.URIRef("http://example.org/y"),
rdflib.term.Literal("bar"),
rdflib.term.URIRef("http://www.example.com/gr"))
(rdflib.term.URIRef("http://example.org/x"),
rdflib.term.URIRef("http://example.org/z"),
rdflib.term.Literal("foo-bar"),
rdflib.term.URIRef("http://www.example.com/gr"))
>>>
>>> # resticting iteration to a graph:
>>> for q in ds.quads((None, None, None, g)): # doctest: +SKIP
... print(q) # doctest: +NORMALIZE_WHITESPACE
(rdflib.term.URIRef("http://example.org/x"),
rdflib.term.URIRef("http://example.org/y"),
rdflib.term.Literal("bar"),
rdflib.term.URIRef("http://www.example.com/gr"))
(rdflib.term.URIRef("http://example.org/x"),
rdflib.term.URIRef("http://example.org/z"),
rdflib.term.Literal("foo-bar"),
rdflib.term.URIRef("http://www.example.com/gr"))
>>> # Note that in the call above -
>>> # ds.quads((None,None,None,"http://www.example.com/gr"))
>>> # would have been accepted, too
>>>
>>> # graph names in the dataset can be queried:
>>> for c in ds.graphs(): # doctest: +SKIP
... print(c) # doctest:
DEFAULT
http://www.example.com/gr
>>> # A graph can be created without specifying a name; a skolemized genid
>>> # is created on the fly
>>> h = ds.graph()
>>> for c in ds.graphs(): # doctest: +SKIP
... print(c) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
DEFAULT
http://rdlib.net/.well-known/genid/rdflib/N...
http://www.example.com/gr
>>> # Note that the Dataset.graphs() call returns names of empty graphs,
>>> # too. This can be restricted:
>>> for c in ds.graphs(empty=False): # doctest: +SKIP
... print(c) # doctest: +NORMALIZE_WHITESPACE
DEFAULT
http://www.example.com/gr
>>>
>>> # a graph can also be removed from a dataset via ds.remove_graph(g)
.. versionadded:: 4.0
"""
def __init__(self, store="default", default_union=False, default_graph_base=None):
super(Dataset, self).__init__(store=store, identifier=None)
if not self.store.graph_aware:
raise Exception("DataSet must be backed by a graph-aware store!")
self.default_context = Graph(
store=self.store,
identifier=DATASET_DEFAULT_GRAPH_ID,
base=default_graph_base,
)
self.default_union = default_union
def __str__(self):
pattern = (
"[a rdflib:Dataset;rdflib:storage " "[a rdflib:Store;rdfs:label '%s']]"
)
return pattern % self.store.__class__.__name__
def __reduce__(self):
return (type(self), (self.store, self.default_union))
def __getstate__(self):
return self.store, self.identifier, self.default_context, self.default_union
def __setstate__(self, state):
self.store, self.identifier, self.default_context, self.default_union = state
def graph(self, identifier=None, base=None):
if identifier is None:
from rdflib.term import rdflib_skolem_genid
self.bind(
"genid", "http://rdflib.net" + rdflib_skolem_genid, override=False
)
identifier = BNode().skolemize()
g = self._graph(identifier)
g.base = base
self.store.add_graph(g)
return g
def parse(
self,
source=None,
publicID=None,
format=None,
location=None,
file=None,
data=None,
**args,
):
c = ConjunctiveGraph.parse(
self, source, publicID, format, location, file, data, **args
)
self.graph(c)
return c
def add_graph(self, g):
"""alias of graph for consistency"""
return self.graph(g)
def remove_graph(self, g):
if not isinstance(g, Graph):
g = self.get_context(g)
self.store.remove_graph(g)
if g is None or g == self.default_context:
# default graph cannot be removed
# only triples deleted, so add it back in
self.store.add_graph(self.default_context)
return self
def contexts(self, triple=None):
default = False
for c in super(Dataset, self).contexts(triple):
default |= c.identifier == DATASET_DEFAULT_GRAPH_ID
yield c
if not default:
yield self.graph(DATASET_DEFAULT_GRAPH_ID)
graphs = contexts
def quads(self, quad):
for s, p, o, c in super(Dataset, self).quads(quad):
if c.identifier == self.default_context:
yield s, p, o, None
else:
yield s, p, o, c.identifier
def __iter__(
self,
) -> Generator[Tuple[Node, URIRef, Node, Optional[IdentifiedNode]], None, None]:
"""Iterates over all quads in the store"""
return self.quads((None, None, None, None))
class QuotedGraph(Graph):
"""
Quoted Graphs are intended to implement Notation 3 formulae. They are
associated with a required identifier that the N3 parser *must* provide
in order to maintain consistent formulae identification for scenarios
such as implication and other such processing.
"""
def __init__(self, store, identifier):
super(QuotedGraph, self).__init__(store, identifier)
def add(self, triple: Tuple[Node, Node, Node]):
"""Add a triple with self as context"""
s, p, o = triple
assert isinstance(s, Node), "Subject %s must be an rdflib term" % (s,)
assert isinstance(p, Node), "Predicate %s must be an rdflib term" % (p,)
assert isinstance(o, Node), "Object %s must be an rdflib term" % (o,)
self.store.add((s, p, o), self, quoted=True)
return self
def addN(self, quads: Tuple[Node, Node, Node, Any]) -> "QuotedGraph": # type: ignore[override]
"""Add a sequence of triple with context"""
self.store.addN(
(s, p, o, c)
for s, p, o, c in quads
if isinstance(c, QuotedGraph)
and c.identifier is self.identifier
and _assertnode(s, p, o)
)
return self
def n3(self):
"""Return an n3 identifier for the Graph"""
return "{%s}" % self.identifier.n3()
def __str__(self):
identifier = self.identifier.n3()
label = self.store.__class__.__name__
pattern = (
"{this rdflib.identifier %s;rdflib:storage "
"[a rdflib:Store;rdfs:label '%s']}"
)
return pattern % (identifier, label)
def __reduce__(self):
return QuotedGraph, (self.store, self.identifier)
# Make sure QuotedGraph is ordered correctly
# wrt to other Terms.
# this must be done here, as the QuotedGraph cannot be
# circularily imported in term.py
rdflib.term._ORDERING[QuotedGraph] = 11
class Seq(object):
"""Wrapper around an RDF Seq resource
It implements a container type in Python with the order of the items
returned corresponding to the Seq content. It is based on the natural
ordering of the predicate names _1, _2, _3, etc, which is the
'implementation' of a sequence in RDF terms.
"""
def __init__(self, graph, subject):
"""Parameters:
- graph:
the graph containing the Seq
- subject:
the subject of a Seq. Note that the init does not
check whether this is a Seq, this is done in whoever
creates this instance!
"""
_list = self._list = list()
LI_INDEX = URIRef(str(RDF) + "_")
for (p, o) in graph.predicate_objects(subject):
if p.startswith(LI_INDEX): # != RDF.Seq: #
i = int(p.replace(LI_INDEX, ""))
_list.append((i, o))
# here is the trick: the predicates are _1, _2, _3, etc. Ie,
# by sorting the keys (by integer) we have what we want!
_list.sort()
def toPython(self):
return self
def __iter__(self):
"""Generator over the items in the Seq"""
for _, item in self._list:
yield item
def __len__(self):
"""Length of the Seq"""
return len(self._list)
def __getitem__(self, index):
"""Item given by index from the Seq"""
index, item = self._list.__getitem__(index)
return item
class ModificationException(Exception):
def __init__(self):
pass
def __str__(self):
return (
"Modifications and transactional operations not allowed on "
"ReadOnlyGraphAggregate instances"
)
class UnSupportedAggregateOperation(Exception):
def __init__(self):
pass
def __str__(self):
return "This operation is not supported by ReadOnlyGraphAggregate " "instances"
class ReadOnlyGraphAggregate(ConjunctiveGraph):
"""Utility class for treating a set of graphs as a single graph
Only read operations are supported (hence the name). Essentially a
ConjunctiveGraph over an explicit subset of the entire store.
"""
def __init__(self, graphs, store="default"):
if store is not None:
super(ReadOnlyGraphAggregate, self).__init__(store)
Graph.__init__(self, store)
self.__namespace_manager = None
assert (
isinstance(graphs, list)
and graphs
and [g for g in graphs if isinstance(g, Graph)]
), "graphs argument must be a list of Graphs!!"
self.graphs = graphs
def __repr__(self):
return "<ReadOnlyGraphAggregate: %s graphs>" % len(self.graphs)
def destroy(self, configuration):
raise ModificationException()
# Transactional interfaces (optional)
def commit(self):
raise ModificationException()
def rollback(self):
raise ModificationException()
def open(self, configuration, create=False):
# TODO: is there a use case for this method?
for graph in self.graphs:
graph.open(self, configuration, create)
def close(self):
for graph in self.graphs:
graph.close()
def add(self, triple):
raise ModificationException()
def addN(self, quads):
raise ModificationException()
def remove(self, triple):
raise ModificationException()
def triples(self, triple):
s, p, o = triple
for graph in self.graphs:
if isinstance(p, Path):
for s, o in p.eval(self, s, o):
yield s, p, o
else:
for s1, p1, o1 in graph.triples((s, p, o)):
yield s1, p1, o1
def __contains__(self, triple_or_quad):
context = None
if len(triple_or_quad) == 4:
context = triple_or_quad[3]
for graph in self.graphs:
if context is None or graph.identifier == context.identifier:
if triple_or_quad[:3] in graph:
return True
return False
def quads(self, triple_or_quad):
"""Iterate over all the quads in the entire aggregate graph"""
c = None
if len(triple_or_quad) == 4:
s, p, o, c = triple_or_quad
else:
s, p, o = triple_or_quad
if c is not None:
for graph in [g for g in self.graphs if g == c]:
for s1, p1, o1 in graph.triples((s, p, o)):
yield s1, p1, o1, graph
else:
for graph in self.graphs:
for s1, p1, o1 in graph.triples((s, p, o)):
yield s1, p1, o1, graph
def __len__(self):
return sum(len(g) for g in self.graphs)
def __hash__(self):
raise UnSupportedAggregateOperation()
def __cmp__(self, other):
if other is None:
return -1
elif isinstance(other, Graph):
return -1
elif isinstance(other, ReadOnlyGraphAggregate):
return (self.graphs > other.graphs) - (self.graphs < other.graphs)
else:
return -1
def __iadd__(self, other):
raise ModificationException()
def __isub__(self, other):
raise ModificationException()
# Conv. methods
def triples_choices(self, triple, context=None):
subject, predicate, object_ = triple
for graph in self.graphs:
choices = graph.triples_choices((subject, predicate, object_))
for (s, p, o) in choices:
yield s, p, o
def qname(self, uri):
if hasattr(self, "namespace_manager") and self.namespace_manager:
return self.namespace_manager.qname(uri)
raise UnSupportedAggregateOperation()
def compute_qname(self, uri, generate=True):
if hasattr(self, "namespace_manager") and self.namespace_manager:
return self.namespace_manager.compute_qname(uri, generate)
raise UnSupportedAggregateOperation()
def bind(self, prefix, namespace, override=True):
raise UnSupportedAggregateOperation()
def namespaces(self):
if hasattr(self, "namespace_manager"):
for prefix, namespace in self.namespace_manager.namespaces():
yield prefix, namespace
else:
for graph in self.graphs:
for prefix, namespace in graph.namespaces():
yield prefix, namespace
def absolutize(self, uri, defrag=1):
raise UnSupportedAggregateOperation()
def parse(self, source, publicID=None, format=None, **args):
raise ModificationException()
def n3(self):
raise UnSupportedAggregateOperation()
def __reduce__(self):
raise UnSupportedAggregateOperation()
def _assertnode(*terms):
for t in terms:
assert isinstance(t, Node), "Term %s must be an rdflib term" % (t,)
return True
class BatchAddGraph(object):
"""
Wrapper around graph that turns batches of calls to Graph's add
(and optionally, addN) into calls to batched calls to addN`.
:Parameters:
- graph: The graph to wrap
- batch_size: The maximum number of triples to buffer before passing to
Graph's addN
- batch_addn: If True, then even calls to `addN` will be batched according to
batch_size
graph: The wrapped graph
count: The number of triples buffered since initialization or the last call to reset
batch: The current buffer of triples
"""
def __init__(self, graph: Graph, batch_size: int = 1000, batch_addn: bool = False):
if not batch_size or batch_size < 2:
raise ValueError("batch_size must be a positive number")
self.graph = graph
self.__graph_tuple = (graph,)
self.__batch_size = batch_size
self.__batch_addn = batch_addn
self.reset()
def reset(self):
"""
Manually clear the buffered triples and reset the count to zero
"""
self.batch = []
self.count = 0
return self
def add(
self,
triple_or_quad: Union[Tuple[Node, Node, Node], Tuple[Node, Node, Node, Any]],
) -> "BatchAddGraph":
"""
Add a triple to the buffer
:param triple: The triple to add
"""
if len(self.batch) >= self.__batch_size:
self.graph.addN(self.batch)
self.batch = []
self.count += 1
if len(triple_or_quad) == 3:
self.batch.append(triple_or_quad + self.__graph_tuple)
else:
self.batch.append(triple_or_quad)
return self
def addN(self, quads: Iterable[Tuple[Node, Node, Node, Any]]):
if self.__batch_addn:
for q in quads:
self.add(q)
else:
self.graph.addN(quads)
return self
def __enter__(self):
self.reset()
return self
def __exit__(self, *exc):
if exc[0] is None:
self.graph.addN(self.batch)
def test():
import doctest
doctest.testmod()
if __name__ == "__main__":
test()
|
RDFLib/rdflib
|
rdflib/graph.py
|
Python
|
bsd-3-clause
| 84,117
|