code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import datetime, re
from mod_helper import *
debug = True
def sesamstrasseShow():
mediaList = ObjectContainer(no_cache=True)
if debug == True: Log("Running sesamstrasseShow()...")
try:
urlMain = "http://www.sesamstrasse.de"
content = getURL(urlMain+"/home/homepage1077.html")
spl = content.split('<div class="thumb">')
for i in range(1, len(spl), 1):
entry = spl[i]
match = re.compile('title="(.+?)"', re.DOTALL).findall(entry)
title = match[0]
match = re.compile('href="(.+?)"', re.DOTALL).findall(entry)
url = urlMain+match[0]
match = re.compile('src="(.+?)"', re.DOTALL).findall(entry)
thumb = urlMain+match[0]
thumb = thumb[:thumb.find("_")]+"_v-original.jpg"
match = re.compile('<div class="subline">(.+?) \\| (.+?):', re.DOTALL).findall(entry)
date = ""
duration = ""
if match:
date = match[0][0]
date = date[:date.rfind('.')].strip()
duration = match[0][1]
title = date+" - "+title
item = {
'title':title,
'url':url,
'thumb':thumb,
'duration':int(duration)*60000
}
if debug == True: Log("Adding: " + title)
vo = sesamstrasseCreateVideoObject(item)
mediaList.add(vo)
return mediaList
except Exception as e:
if debug == True: Log("ERROR: " + str(e))
def sesamstrasseCreateVideoObject(item, container = False):
if debug == True: Log("Running sesamstrasseCreateVideoObject()...")
if debug == True: Log("Creating VideoObject: " + str(item))
try:
vo = VideoClipObject(
key = Callback(sesamstrasseCreateVideoObject, item = item, container = True),
title = item['title'],
thumb = item['thumb'],
duration = item['duration'],
rating_key = item['url'],
items = []
)
# Lookup URL and create MediaObject.
mo = MediaObject(parts = [PartObject(key = Callback(sesamstrasseGetStreamingUrl, url = item['url']))])
# Append mediaobject to clipobject.
vo.items.append(mo)
if container:
return ObjectContainer(objects = [vo])
else:
return vo
except Exception as e:
if debug == True: Log("ERROR: " + str(e))
def sesamstrasseGetStreamingUrl(url):
if debug == True: Log("Running sesamstrasseGetStreamingUrl()...")
try:
quality = 'hd'
if ',sesamstrasse' in url:
regex_suffix_id = ',sesamstrasse(.+?).html'
try: suffix_id = re.findall(regex_suffix_id, url)[0]
except: suffix_id = '3000'
else: suffix_id = '3000'
content = getURL(url)
json_uuid = re.findall('player_image-(.+?)_', content)[0]
json_url = 'http://www.sesamstrasse.de/sendungsinfos/sesamstrasse%s-ppjson_image-%s.json' % (suffix_id, json_uuid)
json = getURL(json_url)
regex_qualities = '\.,(.+?),\.'
qualities = re.findall(regex_qualities, json)[-1].split(',')
if not (quality in qualities): quality = qualities[-1]
regex_url = '"src": "http://(.+?)"'
urls = re.findall(regex_url, json)
stream_url = ''
for url in urls:
if url.endswith('.mp4'):
stream_url = 'http://' + url[:-6] + quality + '.mp4'
break
if not stream_url: return
if debug == True: Log("Playing video URL: " + stream_url)
return Redirect(stream_url)
except Exception as e:
if debug == True: Log("ERROR: " + str(e))
|
realriot/KinderThek.bundle
|
Contents/Code/mod_sesamstrasse.py
|
Python
|
bsd-3-clause
| 3,151
|
from code import InteractiveConsole
import sys
try:
import websocket
except:
print('Please install websocket_client')
raise
class InteractiveRemoteConsole(InteractiveConsole):
def __init__(self, uri=None):
if uri is None:
uri = 'ws://localhost:44445/'
InteractiveConsole.__init__(self, None, "<remoteconsole>")
self.websocket = websocket.create_connection(uri)
self.websocket.settimeout(5)
def interact(self, banner=None, exitmsg=None):
if banner is None:
self.write("Remote Python to Minecraft\n")
elif banner:
self.write("%s\n" % str(banner))
self.recv()
while 1:
try:
try:
line = self.raw_input()
except EOFError:
self.write("\n")
break
else:
self.push(line)
self.recv()
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
break
self.websocket.close()
if exitmsg is None:
self.write('now exiting %s...\n' % self.__class__.__name__)
elif exitmsg != '':
self.write('%s\n' % exitmsg)
def recv(self, supress_prompt=False):
result = None
while result is None or (not result.endswith('>>> ') and not result.endswith('... ')):
result = self.websocket.recv()
if not supress_prompt or (not result.endswith('>>> ') and not result.endswith('... ')):
print(result, end = '')
def push(self, line):
self.websocket.send(line)
def interact(uri=None, readfunc=None, banner=None, exitmsg=None):
console = InteractiveRemoteConsole(uri)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner, exitmsg)
if __name__ == '__main__':
if len(sys.argv) > 1:
uri = None if len(sys.argv) <= 2 else sys.argv[2]
source = sys.argv[1]
console = InteractiveRemoteConsole(uri)
with open(source, 'r') as sourcefile:
# Wait for initial prompt
console.recv(supress_prompt=True)
# Send lines
for line in sourcefile:
line = line.rstrip()
console.push(line)
console.recv(supress_prompt=True)
# Add final new lines
console.push("")
console.recv(supress_prompt=True)
console.push("")
console.recv(supress_prompt=True)
else:
interact()
|
Macuyiko/jycraft-legacy
|
remote-client.py
|
Python
|
bsd-3-clause
| 2,707
|
#---------------------------------
#Joseph Boyd - joseph.boyd@epfl.ch
#---------------------------------
from bs4 import BeautifulSoup
from urllib2 import urlopen
import csv
BASE_URL = 'http://www.tutiempo.net'
PAGE_1 = '/en/Climate/India/IN.html'
PAGE_2 = '/en/Climate/India/IN_2.html'
headings = ['Location', 'Year', 'Month', 'T', 'TM', 'Tm', 'SLP', 'H', 'PP', 'VV', 'V', 'VM', 'VG', 'RA', 'SN', 'TS', 'FG']
MAX_ROWS = 100000
FIRST_YEAR = 1999
def get_links(url):
html = urlopen(url).read()
soup = BeautifulSoup(html, 'lxml')
location_links = soup.find('div', id='ListadosV4')
locations_links = [BASE_URL + li.a['href'] for li in location_links.findAll('li')]
return locations_links
def write_log(message):
f_log = open("log.txt", 'a')
f_log.write(message)
f_log.close()
def main():
links = get_links(BASE_URL + PAGE_1)
links.extend(get_links(BASE_URL + PAGE_2))
csvfile = open('climate_data_1.csv', 'wb')
csv_writer = csv.writer(csvfile)
csv_writer.writerow(headings)
num_rows = 0; num_files = 1
for link in links:
print ('Retrieving data from %s ...\n'%(link))
html = urlopen(link).read()
soup = BeautifulSoup(html, 'lxml')
year_list = soup.find('div', id='SelectYear')
title = link.split('/')[-2]
print ('Location: %s\n'%(title))
if year_list is None:
continue
for li in year_list.findAll('li'):
year = int(','.join(li.findAll(text=True)))
print (str(year) + '\n')
if year >= FIRST_YEAR:
html = urlopen(BASE_URL + li.a['href']).read()
soup = BeautifulSoup(html, 'lxml')
month_list = soup.find('div', id='SelectMes')
if month_list is None:
month_list = soup.find('div','ListasLeft')
if month_list is None:
continue
for month in month_list.findAll('li'):
month_name = ','.join(month.findAll(text=True))
if month_name[0:10] == 'Historical':
month_name = month_name.split(" ")[1]
print (month_name + '\n')
html = urlopen(BASE_URL + month.a['href']).read()
soup = BeautifulSoup(html, 'lxml')
climate_table = soup.find('table', 'TablaClima')
if climate_table is None:
continue
climate_rows = climate_table.findAll('tr')
for row in climate_rows[1:-2]:
data = row.findAll('td')
print_line = [title, year, month_name]
for datum in data:
a = ','.join(datum.findAll(text=True))
print_line.append(a.encode('utf8'))
csv_writer.writerow(print_line)
num_rows += 1
if num_rows == MAX_ROWS:
csvfile.close()
num_files += 1
csvfile = open('climate_data_%s.csv'%(num_files), 'wb')
csv_writer = csv.writer(csvfile)
csv_writer.writerow(headings)
num_rows = 0
csvfile.close()
if __name__ == '__main__':
main()
|
FAB4D/humanitas
|
data_collection/ts/climate/get_climate_data.py
|
Python
|
bsd-3-clause
| 3,498
|
import csv, datetime
from cStringIO import StringIO
# this must match formdata.db_worker_settings : FIELDDIR_LOCATION
# the variable is set by the formdata/management/commands/generate_field_name_list.py
# the assumption here is that this is being executed from this directory
from field_reference import fields
# need a settings place
TRANSACTION_MAX_ROWS = 1000
class CSV_dumper(object):
""" Helper class to aggregate electronic filing data rows, which can then be loaded w/ raw postgres 'copy...' in a single transaction block. Because we're using cStringIO we can't both read and write--once we get the value from the StringIO we're done. """
def _get_writer(self, stringio, fields):
writer = csv.DictWriter(stringio, fields, restval="", extrasaction='ignore', lineterminator='\n', delimiter="|", quoting=csv.QUOTE_NONE, quotechar='', escapechar='')
return writer
def __init__(self, connection):
# do we want to use the filing number to leave breadcrumbs ? Probably not, but...
now = datetime.datetime.now()
formatted_time = now.strftime("%Y%m%d_%H%M")
connection.set_isolation_level(0)
self.cursor = connection.cursor()
self.fields = fields
self.writers = {}
self.counter = {}
for sked in ['A', 'B', 'E', 'O']:
self.writers[sked] = {}
self.writers[sked]['stringio'] = StringIO()
# hack to make csv use pipes as delimiters and not escape quote chars. We need to use quote chars to create the hstores for postgres, so...
self.writers[sked]['writer'] = self._get_writer(self.writers[sked]['stringio'], self.fields[sked])
self.counter[sked] = 0
def _get_db_name(self, sked):
# just for testing -- don't actually use this in normal operation #
db_name = "formdata_otherline"
if sked in (['A', 'B', 'E']):
db_name = "formdata_sked%s" % (sked.lower())
return db_name
def _commit_rows(self, sked):
print "\nCommitting sked %s with length %s" % (sked, self.counter[sked])
# mark the end of the data
self.writers[sked]['stringio'].write("\\.\n")
## commit here
length = self.writers[sked]['stringio'].tell()
self.writers[sked]['stringio'].seek(0)
dbname = self._get_db_name(sked)
self.cursor.copy_from(self.writers[sked]['stringio'], dbname, sep='|', size=length, columns=self.fields[sked], null="")
print "Commit completed."
## We're done, now clear the var
self.writers[sked]['stringio'].close()
self.writers[sked]['stringio'] = StringIO()
self.writers[sked]['writer'] = self._get_writer(self.writers[sked]['stringio'], self.fields[sked])
def writerow(self, sked, dictrow):
self.counter[sked] = self.counter[sked] + 1
#print "Writing row %s with counter set to %s" % (sked, self.counter[sked])
#print "Row is %s" % dictrow
thiswriter = self.writers[sked]['writer']
thiswriter.writerow(dictrow)
if self.counter[sked] % TRANSACTION_MAX_ROWS == 0:
self._commit_rows(sked)
return 1
def get_counter(self):
return self.counter
def _get_sql_data(self, sked):
return self.writers[sked]['stringio'].getvalue()
def commit_all(self):
for sked in ['A', 'B', 'E', 'O']:
if self.counter[sked] > 0:
self._commit_rows(sked)
def close(self):
for sked in ['A', 'B', 'E', 'O']:
self.writers[sked]['stringio'].close()
"""
from formdata.utils.write_csv_to_db import CSV_dumper
d = CSV_dumper()
data = {'filing_number':23}
d.writerow('E', datadict)
d.get_rowdata('E')
"""
|
sunlightlabs/read_FEC
|
fecreader/formdata/utils/write_csv_to_db.py
|
Python
|
bsd-3-clause
| 3,882
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Roboterclub Aachen e.V.
# All rights reserved.
#
# The file is part of the xpcc library and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
from logger import Logger
class DeviceMerger:
""" DeviceMerger
Merges several devices into logical groups.
Since most Devices have the same peripherals, but differ in other
parameters, like Flash/RAM/EEPROM sizes, packaging or other minor
differences, it makes sense to group them accordingly.
Please not that this class only makes the resulting XML files more user
friendly to manually edit, since the user does not have to apply the
changes to multiple files. Ideally - reality might differ :(.
"""
def __init__(self, devices, logger=None):
self.mergedDevices = list(devices)
if logger == None:
self.log = Logger()
else:
self.log = logger
def mergedByPlatform(self, platform):
if platform == 'avr':
self.mergedByType()
self.mergedByName()
elif platform == 'stm32':
self.mergedBySize()
self.mergedByName()
def mergedByType(self):
self.mergedDevices = self._mergeDevicesByType(self.mergedDevices)
def mergedByName(self):
self.mergedDevices = self._mergeDevicesByName(self.mergedDevices)
def mergedBySize(self):
self.mergedDevices = self._mergeDevicesBySize(self.mergedDevices)
def _mergeDevicesByName(self, devices):
"""
This is a simple helper method to merge devices based on name.
"""
avrDevices = []
xmegaDevices = []
stm32Devices = []
result = []
for dev in devices:
if dev.ids.intersection.platform == 'avr':
if dev.ids.intersection.family == 'xmega':
xmegaDevices.append(dev)
else:
avrDevices.append(dev)
elif dev.ids.intersection.platform == 'stm32':
stm32Devices.append(dev)
else:
result.append(dev)
avrDevices = self._mergeDevicesByNameAVR(avrDevices)
xmegaDevices = self._mergeDevicesByNameXMEGA(xmegaDevices)
stm32Devices = self._mergeDevicesByNameSTM32(stm32Devices)
result.extend(avrDevices)
result.extend(xmegaDevices)
result.extend(stm32Devices)
return result
def _mergeDevicesByNameSTM32(self, devices):
"""
This checks the size-id and name of the devices, and merges the devices
based on the observation, that the size-id only influences the size of
memories, i.e. FLASH, RAM.
"""
# copy the devices, since this array will be modified
devs = list(devices)
merged = []
while len(devs) > 0:
current = devs[0]
devs.remove(current)
matches = []
name_ids = self._getCategoryNameSTM32(current)
size_ids = self._getCategorySizeSTM32(current)
self.log.info("ByName: Searching for device with names '%s' and size-ids '%s'" % (name_ids, size_ids))
for dev in devs:
if dev.ids.getAttribute('name')[0] in name_ids and \
dev.ids.getAttribute('size_id')[0] in size_ids:
matches.append(dev)
for match in matches:
devs.remove(match)
current = current.getMergedDevice(match)
if len(matches) == 0:
self.log.info("ByName: no match for device: " + current.id.string)
self.log.debug("ByName:\nResulting device:\n" + str(current))
merged.append(current)
return merged
def _getCategoryNameSTM32(self, device):
names = device.ids.getAttribute('name')
family = device.id.family
if family == 'f0':
categories = [ ['030', '050', '070'],
['031', '051', '071', '091'],
['042', '072'],
['038', '048', '058', '078', '098']]
elif family == 'f1':
categories = [ ['100'],
['101', '102'],
['103'],
['105', '107'] ]
elif family == 'f2':
categories = [ ['205', '207', '215', '217'] ]
elif family == 'f3':
categories = [ ['301'],
['302'],
['303'],
['334'],
['318', '328', '358', '378', '398'],
['373'] ]
elif family == 'f4':
categories = [ ['401', '411'],
['410'],
['412'],
['405', '415', '407', '417'],
['427', '437', '429', '439'],
['446'],
['469', '479'] ]
elif family == 'f7':
categories = [ ['745', '746', '756'],
['765', '767', '768', '769', '777', '778', '779'] ]
# make sure that only one category is used!
for cat in categories:
if names[0] in cat:
return cat
return categories[0]
def _mergeDevicesByNameXMEGA(self, devices):
"""
This checks the size-id and name of the devices, and merges the devices
based on the observation, that the size-id only influences the size of
memories, i.e. FLASH, RAM, and EEPROM.
"""
# copy the devices, since this array will be modified
devs = list(devices)
merged = []
while len(devs) > 0:
current = devs[0]
devs.remove(current)
matches = []
device_type = current.ids.getAttribute('type')[0]
if device_type != None:
self.log.info("ByName: Searching for device with type '%s'" % device_type)
for dev in devs:
if dev.ids.getAttribute('type')[0] == device_type:
# A3 none|b and bu|u are different enough to warrant
# a new device file
if device_type == 'a3':
if dev.ids.getAttribute('pin_id')[0] in self._getCategoryPinIdAVR(current):
matches.append(dev)
else:
matches.append(dev)
for match in matches:
devs.remove(match)
current = current.getMergedDevice(match)
if len(matches) == 0:
self.log.info("ByName: no match for device: " + current.id.string)
self.log.debug("ByName:\nResulting device:\n" + str(current))
merged.append(current)
return merged
def _mergeDevicesByNameAVR(self, devices):
"""
This checks the size-id and name of the devices, and merges the devices
based on the observation, that the size-id only influences the size of
memories, i.e. FLASH, RAM, and EEPROM.
"""
# copy the devices, since this array will be modified
devs = list(devices)
merged = []
while len(devs) > 0:
current = devs[0]
devs.remove(current)
matches = []
size_id = current.ids.getAttribute('size_id')[0]
if size_id != None:
name = current.ids.getAttribute('name')[0]
device_type = current.ids.getAttribute('type')[0]
family = name[len(size_id):]
if not (family == "" and device_type == None):
device_type = self._getCategoryTypeAVR(current)
self.log.info("ByName: Searching for device ending in '"
+ family + "' and '" + str(device_type) + "'")
for dev in devs:
dname = dev.ids.getAttribute('name')[0]
dsize_id = dev.ids.getAttribute('size_id')[0]
# if they do not have a size-id they are probably unmergable
if dsize_id != None:
dfamily = dname[len(dsize_id):]
# perpare for type comparison
# we should only merge when the family is the same,
# and if the type is the same
if dfamily == family and dev.ids.getAttribute('type')[0] in device_type:
matches.append(dev)
# The following code is Atmel's fault with their stupid naming schemes.
# the AT90's, ATmega's and ATtiny's have special merging rules
if current.id.family == "at90":
name = current.id.name
# Some Devices are just not in the same group
if name in ['1', '2', '3', '216', '316', '646', '647', '1286', '1287']:
# these are not the matches you are looking for *move hand*
matches = []
# these are not the devices you want to matched with
for match in matches:
if match.id.name in ['1', '2', '3', '216', '316', '646', '647', '1286', '1287']:
matches.remove(match)
break
# but these are:
namesA = [ ['1', '2', '216'], ['3', '316'], ['646', '647', '1286', '1287'] ]
for names in namesA:
if name in names:
for dev in [d for d in devs if dev.id.family == "at90"]:
for dname in dev.ids.getAttribute('name'):
if dname in names:
matches.append(dev)
if current.id.family == "atmega":
name = current.id.name
if current.ids.getAttribute('type')[0] in [None, 'none', 'p', 'a', 'pa']:
# Some Devices are just not in the same group
if name in ['8', '16', '32', '64', '128']:
# these are not the matches you are looking for *move hand*
matches = []
# these are not the devices you want to be matched with
for match in matches:
if match.id.name in ['8', '16', '32', '64', '128']:
matches.remove(match)
break
# but these are:
namesA = [ ['16', '32'], ['64', '128'] ]
for names in namesA:
if name in names:
for dev in devs:
if dev.id.family == "atmega" and dev.ids.getAttribute('type')[0] in [None, 'none', 'p', 'a', 'pa']:
for dname in dev.ids.getAttribute('name'):
if dname in names:
matches.append(dev)
if current.id.family == "attiny":
name = current.id.name
names = ['4', '5', '9', '10']
if name in names:
for dev in devs:
if dev.id.family == "attiny":
for dname in dev.ids.getAttribute('name'):
if dname in names:
matches.append(dev)
# Some Devices are just not in the same group
if name in ['28', '20', '40']:
# these are not the matches you are looking for *move hand*
matches = []
# these are not the devices you want to matched with
for match in matches:
if match.id.name in ['28', '20', '40']:
matches.remove(match)
break
for match in matches:
devs.remove(match)
current = current.getMergedDevice(match)
if len(matches) == 0:
self.log.info("ByName: no match for device: " + current.id.string)
self.log.debug("ByName:\nResulting device:\n" + str(current))
merged.append(current)
return merged
def _mergeDevicesBySize(self, devices):
"""
This is a simple helper method to merge devices based on size.
"""
stm32Devices = []
result = []
for dev in devices:
if dev.id.platform == 'stm32':
stm32Devices.append(dev)
else:
result.append(dev)
stm32Devices = self._mergeDevicesBySizeSTM32(stm32Devices)
result.extend(stm32Devices)
return result
def _mergeDevicesBySizeSTM32(self, devices):
"""
This checks the size-id and name of the devices, and merges the devices
based on the observation, that the size-id only influences the size of
memories, i.e. FLASH, RAM.
"""
# copy the devices, since this array will be modified
devs = list(devices)
merged = []
while len(devs) > 0:
current = devs[0]
devs.remove(current)
matches = []
size_ids = self._getCategorySizeSTM32(current)
name = current.ids.getAttribute('name')[0]
self.log.info("BySize: Searching for device with size-id '%s'" % size_ids)
for dev in devs:
if dev.ids.getAttribute('name')[0] == name and \
dev.ids.getAttribute('size_id')[0] in size_ids:
matches.append(dev)
matches.sort(key=lambda k : int(k.getProperty('pin-count').values[0].value), reverse=True)
for match in matches:
devs.remove(match)
current = current.getMergedDevice(match)
if len(matches) == 0:
self.log.info("BySize: no match for device: " + current.id.string)
self.log.debug("BySize:\nResulting device:\n" + str(current))
merged.append(current)
return merged
def _getCategorySizeSTM32(self, device):
size_ids = device.ids.getAttribute('size_id')
family = device.id.family
name = device.ids.getAttribute('name')[0]
# these categories are dependent on name
# these are the categories of mergeable size-ids
if family == 'f0':
categories = [ ['4', '6'],
['8'],
['b', 'c'] ]
if name in ['072', '042']:
categories = [['4', '6'], ['8', 'b']]
elif family == 'f1':
categories = [ ['4', '6'], # low density
['8', 'b'], # medium density
['c', 'd', 'e'], # high density
['f', 'g'] ] # super high density
if name in ['105', '107']:
categories = [ ['8', 'b', 'c'] ] # medium and high density
elif family == 'f2':
categories = [ ['b', 'c', 'd', 'e', 'f', 'g'] ] # high density
elif family == 'f3':
categories = [ ['4', '6', '8'], ['b', 'c', 'd', 'e'] ]
if name in ['373']:
categories = [['8', 'b', 'c']]
elif family == 'f4':
categories = [ ['8', 'b', 'c', 'd'],
['e', 'g', 'i'] ]
if name in ['401']:
categories = [ ['b', 'c', 'd', 'e'] ]
if name in ['411', '412', '446']:
categories = [['c', 'e', 'g']]
elif family == 'f7':
categories = [['e', 'g', 'i']]
# make sure that only one category is used!
for cat in categories:
if size_ids[0] in cat:
return cat
return categories[0]
def _mergeDevicesByType(self, devices):
"""
This is a simple helper method to merge devices based on type.
"""
avrDevices = []
result = []
for dev in devices:
if dev.id.platform == 'avr' and dev.id.family != 'xmega':
avrDevices.append(dev)
else:
result.append(dev)
avrDevices = self._mergeDevicesByTypeAVR(avrDevices)
result.extend(avrDevices)
return result
def _mergeDevicesByTypeAVR(self, devices):
"""
This checks the name suffix (for example 'P', 'A', 'PA') of the
devices and merges them based on the observation, that the suffix
does not have anything to do with the mapping of peripherals.
"""
devs = list(devices)
merged = []
while len(devs) > 0:
current = devs[0]
devs.remove(current)
props = current.id
if props.valid == False:
continue
matches = []
suffix = self._getCategoryTypeAVR(current)
self.log.info("ByType: Searching for device ending in " + str(suffix))
for dev in devs:
if dev.id.name == props.name and dev.id.type in suffix:
matches.append(dev)
for match in matches:
devs.remove(match)
current = current.getMergedDevice(match)
if len(matches) == 0:
self.log.info("ByType: No match for device: " + current.id.string)
self.log.debug("ByType:\nResulting device:\n" + str(current))
merged.append(current)
return merged
def _getCategoryPinIdAVR(self, device):
device_type = device.ids.getAttribute('pin_id')[0]
# these are the categories of mergable types
categories = [ # Xmega devices
[None, 'none', 'b'],
['bu', 'u'],
]
# make sure that only one category is used!
for cat in categories:
if device_type in cat:
return cat
return categories[0]
def _getCategoryTypeAVR(self, device):
device_type = device.ids.getAttribute('type')[0]
# these are the categories of mergable types
categories = [ # ATmega devices
[None, 'none', 'p', 'a', 'pa'],
['rfa1', 'rfa2', 'rfr1', 'rfr2'],
['hvb', 'hvbrevb'],
['hve2'],
['hva'],
['u2'],
['u4', 'u6'],
['m1', 'c1'],
# AT90 devices
['can'],
['pwm'],
['usb'],
]
# make sure that only one category is used!
for cat in categories:
if device_type in cat:
return cat
return categories[0]
|
dergraaf/xpcc
|
tools/device_file_generator/dfg/merger.py
|
Python
|
bsd-3-clause
| 14,982
|
__author__ = "Martin Jakomin, Mateja Rojko"
"""
Classes for boolean operators:
- Var
- Neg
- Or
- And
- Const
Functions:
- nnf
- simplify
- cnf
- solve
- simplify_cnf
"""
import itertools
# functions
def nnf(f):
""" Returns negation normal form """
return f.nnf()
def simplify(f):
""" Simplifies the expression """
return nnf(f).simplify()
def cnf(f):
""" Returns conjunctive normal form """
return nnf(f).cnf().simplify()
def solve(f, v):
""" Solves the expression using the variable values v """
return f.solve(v)
def simplify_cnf(f, v):
""" Simplifies the cnf form using the variable values v """
return cnf(f).simplify_cnf(v).simplify()
# classes
class Var():
"""
Variable
"""
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def solve(self, v):
return v[self.name]
def simplify_cnf(self, v):
if self.name in v:
return Const(v[self.name])
else:
return self
def nnf(self):
return self
def simplify(self):
return self
def cnf(self):
return self
def length(self):
return 1
class Neg():
"""
Negation operator
"""
def __init__(self,v):
self.value = v
def __str__(self):
return "~" + str(self.value.__str__())
def solve(self, v):
return not(self.value.solve(v))
def simplify_cnf(self, v):
if self.value.name in v:
return Const(not(v[self.value.name]))
else:
return self
def nnf(self):
v = self.value
if isinstance(v, Var):
return Neg(v)
elif isinstance(v, Neg):
return v.value.nnf()
elif isinstance(v, And):
return Or([Neg(x) for x in v.value]).nnf()
elif isinstance(v, Or):
return And([Neg(x) for x in v.value]).nnf()
elif isinstance(v, Const):
return v.negate()
def simplify(self):
return self
def cnf(self):
return self
def length(self):
return self.value.length()
class And():
"""
And operator
"""
def __init__(self,lst):
self.value = lst
def __str__(self):
s = "("
for i in self.value:
s += str(i)+" & "
s = s[:len(s)-3]
return s + ")"
def solve(self, v):
for l in self.value:
if l.solve(v) is False:
return False
return True
def simplify_cnf(self, v):
return And([x.simplify_cnf(v) for x in self.value])
def nnf(self):
return And([x.nnf() for x in self.value])
def simplify(self):
s = [x.simplify() for x in self.value]
# And list flatten
ns = []
for x in s:
if isinstance(x, And):
ns.extend(x.value)
else:
ns.append(x)
s = ns
snames = [x.simplify().__str__() for x in s]
s2 = []
for i, x in enumerate(s):
if Neg(x).nnf().__str__() in snames[i+1:]:
return Const(False)
elif isinstance(x, Const):
if x.value is False:
return Const(False)
elif snames[i] not in snames[i+1:]:
s2.append(x)
if len(s2) < 1:
return Const(True)
elif len(s2) is 1:
return s2[0]
return And(s2)
def cnf(self):
return And([x.cnf().simplify() for x in self.value])
def length(self):
return sum([x.length() for x in self.value])
class Or():
"""
Or operator
"""
def __init__(self, lst):
self.value = lst
def __str__(self):
s = "("
for i in self.value:
s += str(i)+" | "
s = s[:len(s)-3]
return s + ")"
def solve(self, v):
for l in self.value:
if l.solve(v) is True:
return True
return False
def simplify_cnf(self, v):
return Or([x.simplify_cnf(v) for x in self.value])
def nnf(self):
return Or([x.nnf() for x in self.value])
def simplify(self):
s = [x.simplify() for x in self.value]
# Or list flatten
ns = []
for x in s:
if isinstance(x,Or):
ns.extend(x.value)
else:
ns.append(x)
s = ns
snames = [x.simplify().__str__() for x in s]
s2 = []
for i, x in enumerate(s):
if Neg(x).nnf().__str__() in snames[i+1:]:
return Const(True)
elif isinstance(x, Const):
if x.value is True:
return Const(True)
elif snames[i] not in snames[i+1:]:
s2.append(x)
if len(s2) < 1:
return Const(False)
elif len(s2) is 1:
return s2[0]
return Or(s2)
def cnf(self):
s = [x.cnf().simplify() for x in self.value]
s1 = [x.value if isinstance(x, And) else [x] for x in s]
s2 = []
for e in itertools.product(*s1):
s3 = []
for x in e:
if isinstance(x,Or):
s3.extend(x.value)
else:
s3.append(x)
s2.append(Or(s3))
if len(s2) is 1:
return s2[0]
return And(s2)
def length(self):
return sum([x.length() for x in self.value])
class Const():
"""
Constant
"""
def __init__(self, c):
self.value = c
def __str__(self):
return str(self.value)
def solve(self, v):
return self.value
def simplify_cnf(self, v):
return self
def nnf(self):
return self
def negate(self):
if self.value is True:
return Const(False)
return Const(True)
def simplify(self):
return self
def cnf(self):
return self
def length(self):
return 1
|
MartinGHub/lvr-sat
|
SAT/bool.py
|
Python
|
bsd-3-clause
| 6,053
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 7, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_Anscombe/trend_ConstantTrend/cycle_7/ar_12/test_artificial_32_Anscombe_ConstantTrend_7_12_100.py
|
Python
|
bsd-3-clause
| 268
|
from __future__ import annotations
import pytest
import scitbx.matrix
from cctbx import crystal, sgtbx, uctbx
from cctbx.sgtbx import bravais_types
from dxtbx.model import Crystal
from dials.algorithms.indexing import symmetry
@pytest.mark.parametrize("space_group_symbol", bravais_types.acentric)
def test_SymmetryHandler(space_group_symbol):
sgi = sgtbx.space_group_info(symbol=space_group_symbol)
sg = sgi.group()
cs = sgi.any_compatible_crystal_symmetry(volume=10000)
uc = cs.unit_cell()
handler = symmetry.SymmetryHandler(unit_cell=uc, space_group=sg)
assert (
handler.target_symmetry_primitive.space_group()
== sg.build_derived_patterson_group().info().primitive_setting().group()
)
assert (
handler.target_symmetry_reference_setting.space_group()
== sg.build_derived_patterson_group().info().reference_setting().group()
)
# test apply_symmetry on the primitive setting
cs_primitive = cs.primitive_setting()
B = scitbx.matrix.sqr(
cs_primitive.unit_cell().fractionalization_matrix()
).transpose()
crystal = Crystal(B, sgtbx.space_group())
crystal_new, cb_op = handler.apply_symmetry(crystal)
crystal_new.get_crystal_symmetry(assert_is_compatible_unit_cell=True)
# test apply_symmetry on the minimum cell setting
cs_min_cell = cs.minimum_cell()
B = scitbx.matrix.sqr(
cs_min_cell.unit_cell().fractionalization_matrix()
).transpose()
crystal = Crystal(B, sgtbx.space_group())
crystal_new, cb_op = handler.apply_symmetry(crystal)
crystal_new.get_crystal_symmetry(assert_is_compatible_unit_cell=True)
handler = symmetry.SymmetryHandler(space_group=sg)
assert handler.target_symmetry_primitive.unit_cell() is None
assert (
handler.target_symmetry_primitive.space_group()
== sg.build_derived_patterson_group().info().primitive_setting().group()
)
assert handler.target_symmetry_reference_setting.unit_cell() is None
assert (
handler.target_symmetry_reference_setting.space_group()
== sg.build_derived_patterson_group().info().reference_setting().group()
)
# test apply_symmetry on the primitive setting
cs_primitive = cs.primitive_setting()
B = scitbx.matrix.sqr(
cs_primitive.unit_cell().fractionalization_matrix()
).transpose()
crystal = Crystal(B, sgtbx.space_group())
crystal_new, cb_op = handler.apply_symmetry(crystal)
crystal_new.get_crystal_symmetry(assert_is_compatible_unit_cell=True)
handler = symmetry.SymmetryHandler(
unit_cell=cs_min_cell.unit_cell(),
space_group=sgtbx.space_group(),
)
assert handler.target_symmetry_primitive.unit_cell().volume() == pytest.approx(
cs_min_cell.unit_cell().volume()
)
assert handler.target_symmetry_primitive.space_group() == sgtbx.space_group("P-1")
assert (
handler.target_symmetry_reference_setting.unit_cell().volume()
== pytest.approx(cs_min_cell.unit_cell().volume())
)
assert handler.target_symmetry_reference_setting.space_group() == sgtbx.space_group(
"P-1"
)
# https://github.com/dials/dials/issues/1254
def test_SymmetryHandler_no_match():
sgi = sgtbx.space_group_info(symbol="P422")
cs = sgi.any_compatible_crystal_symmetry(volume=10000)
B = scitbx.matrix.sqr(cs.unit_cell().fractionalization_matrix()).transpose()
crystal = Crystal(B, sgtbx.space_group())
handler = symmetry.SymmetryHandler(
unit_cell=None, space_group=sgtbx.space_group_info("I23").group()
)
assert handler.apply_symmetry(crystal) == (None, None)
# https://github.com/dials/dials/issues/1217
@pytest.mark.parametrize(
"crystal_symmetry",
[
crystal.symmetry(
unit_cell=(
44.66208171,
53.12629403,
62.53397661,
64.86329707,
78.27343894,
90,
),
space_group_symbol="C 1 2/m 1 (z,x+y,-2*x)",
),
crystal.symmetry(
unit_cell=(44.3761, 52.5042, 61.88555952, 115.1002877, 101.697107, 90),
space_group_symbol="C 1 2/m 1 (-z,x+y,2*x)",
),
],
)
def test_symmetry_handler_c2_i2(crystal_symmetry):
cs_ref = crystal_symmetry.as_reference_setting()
cs_ref = cs_ref.change_basis(
cs_ref.change_of_basis_op_to_best_cell(best_monoclinic_beta=False)
)
cs_best = cs_ref.best_cell()
# best -> ref is different to cs_ref above
cs_best_ref = cs_best.as_reference_setting()
assert not cs_ref.is_similar_symmetry(cs_best_ref)
B = scitbx.matrix.sqr(
crystal_symmetry.unit_cell().fractionalization_matrix()
).transpose()
cryst = Crystal(B, sgtbx.space_group())
for cs in (crystal_symmetry, cs_ref, cs_best):
print(cs)
handler = symmetry.SymmetryHandler(space_group=cs.space_group())
new_cryst, cb_op = handler.apply_symmetry(cryst)
assert (
new_cryst.change_basis(cb_op).get_crystal_symmetry().is_similar_symmetry(cs)
)
for cs in (crystal_symmetry, cs_ref, cs_best, cs_best_ref):
print(cs)
handler = symmetry.SymmetryHandler(
unit_cell=cs.unit_cell(), space_group=cs.space_group()
)
new_cryst, cb_op = handler.apply_symmetry(cryst)
assert (
new_cryst.change_basis(cb_op).get_crystal_symmetry().is_similar_symmetry(cs)
)
crystal_symmetries = []
cs = crystal.symmetry(
unit_cell=uctbx.unit_cell("76, 115, 134, 90, 99.07, 90"),
space_group_info=sgtbx.space_group_info(symbol="I2"),
)
crystal_symmetries.append(
crystal.symmetry(
unit_cell=cs.minimum_cell().unit_cell(), space_group=sgtbx.space_group()
)
)
cs = crystal.symmetry(
unit_cell=uctbx.unit_cell("42,42,40,90,90,90"),
space_group_info=sgtbx.space_group_info(symbol="P41212"),
)
crystal_symmetries.append(cs.change_basis(sgtbx.change_of_basis_op("c,a,b")))
for symbol in bravais_types.acentric:
sgi = sgtbx.space_group_info(symbol=symbol)
cs = crystal.symmetry(
unit_cell=sgi.any_compatible_unit_cell(volume=1000), space_group_info=sgi
)
cs = cs.niggli_cell().as_reference_setting().primitive_setting()
crystal_symmetries.append(cs)
@pytest.mark.parametrize("crystal_symmetry", crystal_symmetries)
def test_find_matching_symmetry(crystal_symmetry):
cs = crystal_symmetry
cs.show_summary()
for op in ("x,y,z", "z,x,y", "y,z,x", "-x,z,y", "y,x,-z", "z,-y,x")[:]:
cb_op = sgtbx.change_of_basis_op(op)
uc_inp = cs.unit_cell().change_basis(cb_op)
for ref_uc, ref_sg in [
(cs.unit_cell(), cs.space_group()),
(None, cs.space_group()),
][:]:
best_subgroup = symmetry.find_matching_symmetry(
uc_inp, target_space_group=ref_sg
)
cb_op_inp_best = best_subgroup["cb_op_inp_best"]
assert uc_inp.change_basis(cb_op_inp_best).is_similar_to(
cs.as_reference_setting().best_cell().unit_cell()
)
|
dials/dials
|
tests/algorithms/indexing/test_symmetry.py
|
Python
|
bsd-3-clause
| 7,130
|
"""
Tests for the following offsets:
- BQuarterBegin
- BQuarterEnd
"""
from __future__ import annotations
from datetime import datetime
import pytest
from pandas._libs.tslibs.offsets import QuarterOffset
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tseries.offsets import (
BQuarterBegin,
BQuarterEnd,
)
def test_quarterly_dont_normalize():
date = datetime(2012, 3, 31, 5, 30)
offsets = (BQuarterEnd, BQuarterBegin)
for klass in offsets:
result = date + klass()
assert result.time() == date.time()
@pytest.mark.parametrize("offset", [BQuarterBegin(), BQuarterEnd()])
def test_on_offset(offset):
dates = [
datetime(2016, m, d)
for m in [10, 11, 12]
for d in [1, 2, 3, 28, 29, 30, 31]
if not (m == 11 and d == 31)
]
for date in dates:
res = offset.is_on_offset(date)
slow_version = date == (date + offset) - offset
assert res == slow_version
class TestBQuarterBegin(Base):
_offset: type[QuarterOffset] = BQuarterBegin
def test_repr(self):
expected = "<BusinessQuarterBegin: startingMonth=3>"
assert repr(BQuarterBegin()) == expected
expected = "<BusinessQuarterBegin: startingMonth=3>"
assert repr(BQuarterBegin(startingMonth=3)) == expected
expected = "<BusinessQuarterBegin: startingMonth=1>"
assert repr(BQuarterBegin(startingMonth=1)) == expected
def test_is_anchored(self):
assert BQuarterBegin(startingMonth=1).is_anchored()
assert BQuarterBegin().is_anchored()
assert not BQuarterBegin(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
offset = BQuarterBegin(n=-1, startingMonth=1)
assert datetime(2007, 4, 3) + offset == datetime(2007, 4, 2)
offset_cases = []
offset_cases.append(
(
BQuarterBegin(startingMonth=1),
{
datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2007, 3, 15): datetime(2007, 4, 2),
datetime(2007, 2, 28): datetime(2007, 4, 2),
datetime(2007, 1, 1): datetime(2007, 4, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 7, 2),
datetime(2008, 4, 30): datetime(2008, 7, 1),
},
)
)
offset_cases.append(
(
BQuarterBegin(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 8, 15): datetime(2008, 11, 3),
datetime(2008, 9, 15): datetime(2008, 11, 3),
datetime(2008, 11, 1): datetime(2008, 11, 3),
datetime(2008, 4, 30): datetime(2008, 5, 1),
},
)
)
offset_cases.append(
(
BQuarterBegin(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2007, 12, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 15): datetime(2008, 4, 1),
datetime(2008, 2, 27): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 4, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 2): datetime(2007, 7, 2),
},
)
)
offset_cases.append(
(
BQuarterBegin(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2007, 7, 3): datetime(2007, 7, 2),
datetime(2007, 4, 3): datetime(2007, 4, 2),
datetime(2007, 7, 2): datetime(2007, 4, 2),
datetime(2008, 4, 1): datetime(2008, 1, 1),
},
)
)
offset_cases.append(
(
BQuarterBegin(startingMonth=1, n=2),
{
datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 1, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2007, 3, 31): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 10, 1),
datetime(2008, 4, 30): datetime(2008, 10, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
class TestBQuarterEnd(Base):
_offset: type[QuarterOffset] = BQuarterEnd
def test_repr(self):
expected = "<BusinessQuarterEnd: startingMonth=3>"
assert repr(BQuarterEnd()) == expected
expected = "<BusinessQuarterEnd: startingMonth=3>"
assert repr(BQuarterEnd(startingMonth=3)) == expected
expected = "<BusinessQuarterEnd: startingMonth=1>"
assert repr(BQuarterEnd(startingMonth=1)) == expected
def test_is_anchored(self):
assert BQuarterEnd(startingMonth=1).is_anchored()
assert BQuarterEnd().is_anchored()
assert not BQuarterEnd(2, startingMonth=1).is_anchored()
def test_offset_corner_case(self):
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29)
offset_cases = []
offset_cases.append(
(
BQuarterEnd(startingMonth=1),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31),
},
)
)
offset_cases.append(
(
BQuarterEnd(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30),
},
)
)
offset_cases.append(
(
BQuarterEnd(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30),
},
)
)
offset_cases.append(
(
BQuarterEnd(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),
},
)
)
offset_cases.append(
(
BQuarterEnd(startingMonth=1, n=2),
{
datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
|
pandas-dev/pandas
|
pandas/tests/tseries/offsets/test_business_quarter.py
|
Python
|
bsd-3-clause
| 12,465
|
#!/usr/bin/env python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Library for manipulating naclports packages in python.
This library can be used to build tools for working with naclports
packages. For example, it is used by 'update_mirror.py' to iterate
through all packages and mirror them on commondatastorage.
"""
import optparse
import os
import urlparse
import shlex
import shutil
import subprocess
import sys
import tempfile
import sha1check
MIRROR_URL = 'http://commondatastorage.googleapis.com/nativeclient-mirror/nacl'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACLPORTS_ROOT = os.path.dirname(SCRIPT_DIR)
OUT_DIR = os.path.join(NACLPORTS_ROOT, 'out')
ARCH = os.environ.get('NACL_ARCH', 'i686')
BUILD_ROOT = os.path.join(OUT_DIR, 'repository')
ARCHIVE_ROOT = os.path.join(OUT_DIR, 'tarballs')
NACL_SDK_ROOT = os.environ.get('NACL_SDK_ROOT')
# TODO(sbc): use this code to replace the bash logic in build_tools/common.sh
class Error(Exception):
pass
class Package(object):
"""Representation of a single naclports package.
Package objects correspond to folders on disk which
contain a 'pkg_info' file.
"""
def __init__(self, pkg_root):
self.root = os.path.abspath(pkg_root)
info = os.path.join(pkg_root, 'pkg_info')
keys = []
self.URL_FILENAME = None
self.URL = None
self.LICENSE = None
if not os.path.exists(info):
raise Error('Invalid package folder: %s' % pkg_root)
with open(info) as f:
for i, line in enumerate(f):
if line[0] == '#':
continue
if '=' not in line:
raise Error('Invalid pkg_info line %d: %s' % (i + 1, pkg_root))
key, value = line.split('=', 1)
key = key.strip()
value = shlex.split(value.strip())[0]
keys.append(key)
setattr(self, key, value)
assert 'PACKAGE_NAME' in keys
def GetBasename(self):
basename = os.path.splitext(self.GetArchiveFilename())[0]
if basename.endswith('.tar'):
basename = os.path.splitext(basename)[0]
return basename
def __cmp__(self, other):
return cmp(self.PACKAGE_NAME, other.PACKAGE_NAME)
def GetBuildLocation(self):
package_dir = getattr(self, 'PACKAGE_DIR', self.PACKAGE_NAME)
return os.path.join(BUILD_ROOT, package_dir)
def GetArchiveFilename(self):
if self.URL_FILENAME:
return self.URL_FILENAME
elif self.URL:
return os.path.basename(urlparse.urlparse(self.URL)[2])
def DownloadLocation(self):
archive = self.GetArchiveFilename()
if not archive:
return
return os.path.join(ARCHIVE_ROOT, archive)
def Verify(self, verbose=False):
if not self.GetArchiveFilename():
print "no archive: %s" % self.PACKAGE_NAME
return True
self.Download()
olddir = os.getcwd()
sha1file = os.path.join(self.root, self.PACKAGE_NAME + '.sha1')
try:
os.chdir(ARCHIVE_ROOT)
with open(sha1file) as f:
try:
filenames = sha1check.VerifyFile(f, False)
print "verified: %s" % (filenames)
except sha1check.Error as e:
print "verification failed: %s: %s" % (sha1file, str(e))
return False
finally:
os.chdir(olddir)
return True
def Extract(self):
self.ExtractInto(BUILD_ROOT)
def ExtractInto(self, output_path):
"""Extract the package archive into the given location.
This method assumes the package has already been downloaded.
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
new_foldername = os.path.dirname(self.GetBuildLocation())
if os.path.exists(os.path.join(output_path, new_foldername)):
return
tmp_output_path = tempfile.mkdtemp(dir=OUT_DIR)
try:
archive = self.DownloadLocation()
ext = os.path.splitext(archive)[1]
if ext in ('.gz', '.tgz', '.bz2'):
cmd = ['tar', 'xf', archive, '-C', tmp_output_path]
elif ext in ('.zip',):
cmd = ['unzip', '-q', '-d', tmp_output_path, archive]
else:
raise Error('unhandled extension: %s' % ext)
print cmd
subprocess.check_call(cmd)
src = os.path.join(tmp_output_path, new_foldername)
dest = os.path.join(output_path, new_foldername)
os.rename(src, dest)
finally:
shutil.rmtree(tmp_output_path)
def GetMirrorURL(self):
return MIRROR_URL + '/' + self.GetArchiveFilename()
def Enabled(self):
if hasattr(self, 'LIBC'):
if os.environ.get('NACL_GLIBC') == '1':
if self.LIBC != 'glibc':
raise Error('Package cannot be built with glibc.')
else:
if self.LIBC != 'newlib':
raise Error('Package cannot be built with newlib.')
if hasattr(self, 'DISABLED_ARCH'):
arch = os.environ.get('NACL_ARCH', 'x86_64')
if arch == self.DISABLED_ARCH:
raise Error('Package is disabled for current arch: %s.' % arch)
if hasattr(self, 'BUILD_OS'):
sys.path.append(os.path.join(NACL_SDK_ROOT, 'tools'))
import getos
if getos.GetPlatform() != self.BUILD_OS:
raise Error('Package can only be built on: %s.' % self.BUILD_OS)
def Download(self):
filename = self.DownloadLocation()
if not filename or os.path.exists(filename):
return
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
try:
mirror = self.GetMirrorURL()
print 'Downloading: %s [%s]' % (mirror, filename)
cmd = ['wget', '-O', filename, mirror]
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print 'Downloading: %s [%s]' % (self.URL, filename)
cmd = ['wget', '-O', filename, self.URL]
subprocess.check_call(cmd)
def PackageIterator(folders=None):
"""Iterator which yield a Package object for each
naclport package."""
if not folders:
folders = [os.path.join(NACLPORTS_ROOT, 'ports')]
for folder in folders:
for root, dirs, files in os.walk(folder):
if 'pkg_info' in files:
yield Package(root)
def main(args):
try:
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', action='store_true',
help='Output extra information.')
parser.add_option('-C', dest='dirname', default='.',
help='Change directory before executing commands.')
options, args = parser.parse_args(args)
if not args:
parser.error("You must specify a build command")
if len(args) > 1:
parser.error("More than one command specified")
command = args[0]
if not options.dirname:
options.dirname = '.'
if not NACL_SDK_ROOT:
Error("$NACL_SDK_ROOT not set")
p = Package(options.dirname)
if command == 'download':
p.Download()
elif command == 'check':
pass # simply check that the package is valid.
elif command == 'enabled':
p.Enabled()
elif command == 'verify':
p.Verify()
except Error as e:
sys.stderr.write('naclports: %s\n' % e)
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
adlr/naclports
|
build_tools/naclports.py
|
Python
|
bsd-3-clause
| 7,198
|
# -*- coding: utf-8 -*-
from precious import db
from datetime import datetime
class Build(db.Model):
__tablename__ = 'builds'
id = db.Column(db.Integer, primary_key=True, unique=True)
project_id = db.Column(db.Integer, db.ForeignKey('projects.id'))
date = db.Column(db.DateTime)
revision = db.Column(db.LargeBinary)
stdout = db.Column(db.UnicodeText)
success = db.Column(db.Boolean)
def __init__(self, project_id, revision, stdout=u"", success=True, date=datetime.now()):
self.project_id = project_id
self.date = date
self.revision = revision
self.stdout = stdout
self.success = success
def __repr__(self):
return '<Build id:%r project_id:%r>' % (self.id, self.project_id)
|
bzyx/precious
|
precious/models/Build.py
|
Python
|
bsd-3-clause
| 763
|
""" Needed Tests
clip_to_rect() tests
--------------------
DONE *. clip_to_rect is inclusive on lower end and exclusive on upper end.
DONE *. clip_to_rect behaves intelligently under scaled ctm.
DONE *. clip_to_rect intersects input rect with the existing clipping rect.
DONE *. current rectangular clipping path is saved/restored to the stack when
save_state/restore_state are called.
DONE *. clip_to_rect clears current path.
DONE *. clip_to_rect raises NotImplementedError under a rotated ctm.
clip_to_rects() tests
---------------------
DONE *. Test that clip_to_rects raises not implemented, or whatever.
"""
import unittest
from numpy import array, transpose
import nose
from kiva.agg import GraphicsContextArray
import kiva
from test_utils import Utils
class ClipToRectTestCase(unittest.TestCase, Utils):
#------------------------------------------------------------------------
# Simple Clipping to a single rectangle.
#------------------------------------------------------------------------
def clip_to_rect_helper(self, desired, scale, clip_rects):
""" desired -- 2D array with a single channels expected byte pattern.
scale -- used in scale_ctm() to change the ctm.
clip_args -- passed in as *clip_args to clip_to_rect.
"""
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
gc.scale_ctm(scale, scale)
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
if isinstance(clip_rects, tuple):
gc.clip_to_rect(*clip_rects)
else:
for rect in clip_rects:
gc.clip_to_rect(*rect)
gc.rect(0, 0, 4, 4)
# These settings allow the fastest path.
gc.set_fill_color((0.0, 0.0, 0.0)) # black
gc.fill_path()
# test a single color channel
actual = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired, actual)
def test_clip_to_rect_simple(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2, 2)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple2(self):
desired = array([[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 0, 255, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 1, 1)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_negative(self):
desired = array([[255, 255, 255, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255]])
clip_rect = (-1, -1, 4, 4)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple3(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2.49, 2.49)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple4(self):
desired = array([[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2.5, 2.5)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple5(self):
# This tests clipping with a larger rectangle
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rects = [(1, 1, 2, 2), (0, 0, 4, 4)]
self.clip_to_rect_helper(desired, 1, clip_rects)
def test_empty_clip_region(self):
# This tests when the clipping region is clipped down to nothing.
desired = array([[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 255, 255, 255]])
clip_rects = [(1,1,4,4), (3,3,1,1), (1,1,1,1)]
self.clip_to_rect_helper(desired, 1, clip_rects)
def test_clip_to_rect_scaled(self):
desired = array([[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255]])
clip_rect = (1, 1, 2, 2)
self.clip_to_rect_helper(desired, 2.0, clip_rect)
def test_clip_to_rect_scaled2(self):
desired = array([[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255]])
clip_rect = (1, 1, 2.25, 2.25)
self.clip_to_rect_helper(desired, 2.0, clip_rect)
def test_save_restore_clip_state(self):
desired1 = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
desired2 = array([[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 255, 255, 255]])
gc = GraphicsContextArray((4,4), pix_format="rgb24")
gc.clear((1.0, 1.0, 1.0))
gc.set_fill_color((0.0, 0.0, 0.0))
gc.clip_to_rect(1, 1, 3, 3)
gc.save_state()
gc.clip_to_rect(1, 1, 2, 2)
gc.rect(0, 0, 4, 4)
gc.fill_path()
actual1 = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired1, actual1)
gc.restore_state()
gc.rect(0, 0, 4, 4)
gc.fill_path()
actual2 = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired2, actual2)
def test_clip_to_rect_rotated(self):
# FIXME: test skipped
# This test raises an exception currently because the
# underlying library doesn't handle clipping to a rotated
# rectangle. For now, we catch the the case with an
# exception, so that people can't screw up. In the future,
# we should actually support this functionality.
raise nose.SkipTest
gc = GraphicsContextArray((1,1), pix_format="rgb24")
gc.rotate_ctm(1.0)
self.failUnlessRaises(NotImplementedError,
gc.clip_to_rect, 0, 0, 1, 1)
#------------------------------------------------------------------------
# Successive Clipping of multiple rectangles.
#------------------------------------------------------------------------
def successive_clip_helper(self, desired, scale,
clip_rect1, clip_rect2):
""" desired -- 2D array with a single channels expected byte pattern.
scale -- used in scale_ctm() to change the ctm.
clip_rect1 -- 1st clipping path.
clip_rect2 -- 2nd clipping path.
"""
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
gc.scale_ctm(scale, scale)
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
gc.clip_to_rect(*clip_rect1)
gc.clip_to_rect(*clip_rect2)
gc.rect(0, 0, 4, 4)
# These settings allow the fastest path.
gc. set_fill_color((0.0, 0.0, 0.0)) # black
gc.fill_path()
# test a single color channel
actual = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired, actual)
def test_clip_successive_rects(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect1 = (1, 1, 20, 20)
clip_rect2 = (0, 0, 3, 3)
self.successive_clip_helper(desired, 1.0, clip_rect1, clip_rect2)
def test_clip_successive_rects2(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect1 = (1, 1, 20, 20)
clip_rect2 = (-1, -1, 4, 4)
self.successive_clip_helper(desired, 1.0, clip_rect1, clip_rect2)
#------------------------------------------------------------------------
# Save/Restore clipping path.
#------------------------------------------------------------------------
def test_save_restore_clip_path(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
# this is the clipping path we hope to see.
clip_rect1 = (1, 1, 2, 2)
# this will be a second path that will push/pop that should
# never be seen.
clip_rect2 = (1, 1, 1, 1)
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
gc.clip_to_rect(*clip_rect1)
# push and then pop a path that shouldn't affect the drawing
gc.save_state()
gc.clip_to_rect(*clip_rect2)
gc.restore_state()
gc.rect(0, 0, 4, 4)
# These settings allow the fastest path.
gc. set_fill_color((0.0, 0.0, 0.0)) # black
gc.fill_path()
# test a single color channel
actual = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired, actual)
def test_reset_path(self):
""" clip_to_rect() should clear the current path.
This is to maintain compatibility with the version
of kiva that sits on top of Apple's Quartz engine.
"""
desired = array([[255, 255, 0, 0],
[255, 255, 0, 0],
[255, 255, 0, 0],
[255, 255, 0, 0]])
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
gc.rect(0, 0, 2, 4)
gc.clip_to_rect(0, 0, 4, 4)
gc.rect(2, 0, 2, 4)
# These settings allow the fastest path.
gc. set_fill_color((0.0, 0.0, 0.0)) # black
gc.fill_path()
# test a single color channel
actual = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired, actual)
class ClipToRectsTestCase(unittest.TestCase):
def test_not_implemented(self):
""" fix me: Currently not implemented, so we just ensure that
any call to it throws an exception.
"""
gc = GraphicsContextArray((1,1), pix_format="rgb24")
gc.rotate_ctm(1.0)
#self.failUnlessRaises(NotImplementedError, gc.clip_to_rects, [[0, 0, 1, 1]])
if __name__ == "__main__":
unittest.main()
|
tommy-u/enable
|
kiva/agg/tests/clip_to_rect_test_case.py
|
Python
|
bsd-3-clause
| 12,045
|
"""Testing for overlap intervals
"""
import unittest
from genda.transcripts.exon_utils import calcOverlap, collideIntervals, \
collapseIntervals
class TestOverlapFunctions(unittest.TestCase):
def setUp(self):
# Simple Overlap
self.simple = [(1,10), (6,15)]
# One interval enclosed in another
self.enclosed = [(100,200), (110,150)]
# Partial overlap
self.partial = [(150,300), (160,300), (170,330)]
# No overlap
self.no = [(150,300), (10,30)]
# Equal
self.equal = [(1,15), (1,5)]
#Complex interval list
self.full = [(7,20), (1,5), (8,11), (18,50), (100,150)]
def test_bpOverlap(self):
# Make sure overlaps are calculated correctly
self.assertEqual(calcOverlap(self.simple), 4)
self.assertEqual(calcOverlap(self.enclosed), 40)
self.assertEqual(calcOverlap(self.partial),400)
def test_collideIntervals(self):
self.assertEqual(collideIntervals(self.simple[0], self.simple[1]),
[(1,15)])
self.assertEqual(collideIntervals(self.enclosed[0], self.enclosed[1]),
[(100,200)])
self.assertEqual(collideIntervals(self.no[0], self.no[1]),self.no)
def test_collapseIntervals(self):
self.assertEqual(collapseIntervals(self.simple), [(1,15)])
print(self.partial)
self.assertEqual(collapseIntervals(self.partial), [(150,330)])
print(self.full)
self.assertEqual(collapseIntervals(self.full), [(1,5),(7,50),(100,150)])
def test_unique_bp(self):
self.assertEqual(sum(map(lambda x \
:x[1]-x[0],collapseIntervals(self.partial))) -
calcOverlap(self.partial),330-150)
if __name__ == '__main__':
unittest.main()
|
jeffhsu3/genda
|
tests/exon_utils_tests.py
|
Python
|
bsd-3-clause
| 1,802
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""//testing/scripts wrapper for the network traffic annotation auditor checks.
This script is used to run traffic_annotation_auditor_tests.py on an FYI bot to
check that traffic_annotation_auditor has the same results when heuristics that
help it run fast and spam free on trybots are disabled."""
import json
import os
import sys
import tempfile
import common
SHEET_CONFIG = {
"spreadsheet_id": "1TmBr9jnf1-hrjntiVBzT9EtkINGrtoBYFMWad2MBeaY",
"annotations_sheet_name": "Annotations",
"changes_sheet_name": "Changes Stats",
"silent_change_columns": [],
"last_update_column_name": "Last Update",
}
def is_windows():
return os.name == 'nt'
def main_run(args):
annotations_file = tempfile.NamedTemporaryFile()
annotations_filename = annotations_file.name
annotations_file.close()
command_line = [
sys.executable,
os.path.join(common.SRC_DIR, 'tools', 'traffic_annotation', 'scripts',
'traffic_annotation_auditor_tests.py'),
'--build-path',
os.path.join(args.paths['checkout'], 'out', args.build_config_fs),
'--annotations-file',
annotations_filename,
]
rc = common.run_command(command_line)
# Update the Google Sheets on success, but only on the Windows trybot.
if rc == 0 and is_windows():
print("Tests succeeded. Updating annotations sheet...")
config_file = tempfile.NamedTemporaryFile(delete=False)
json.dump(SHEET_CONFIG, config_file, indent=4)
config_filename = config_file.name
config_file.close()
command_line = [
'vpython.bat',
os.path.join(common.SRC_DIR, 'tools', 'traffic_annotation', 'scripts',
'update_annotations_sheet.py'),
'--force',
'--config-file',
config_filename,
'--annotations-file',
annotations_filename,
]
rc = common.run_command(command_line)
try:
os.remove(config_filename)
except OSError:
pass
try:
os.remove(annotations_filename)
except OSError:
pass
json.dump({
'valid': True,
'failures': ['Please refer to stdout for errors.'] if rc else [],
}, args.output)
return rc
def main_compile_targets(args):
json.dump(['all'], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
|
endlessm/chromium-browser
|
testing/scripts/test_traffic_annotation_auditor.py
|
Python
|
bsd-3-clause
| 2,554
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier définissant la classe BaseNoeud détaillée plus bas."""
class BaseNoeud:
"""Classe représentant la base d'un noeud.
Cette classe est héritée par tous les autres types de noeuds.
"""
importeur = None
def __init__(self):
"""Constructeur du noeud de base"""
self.nom = ""
self.suivant = None
def valider(self, personnage, dic_masques, commande, tester_fils=True):
"""Validation du noeud.
Cette méthode est à redéfinir dans chacune des classes-filles créée.
Chaque type de noeud a sa propre méthode de validation.
Dans tous les cas, une booléen doit être retourné :
- True si le noeud a pu être interprété ;
- False sinon.
Note : pour la plupart des noeuds, la validation est aussi fonction
des fils.
"""
raise NotImplementedError
def _get_fils(self):
"""Retourne les fils du noeud sous la forme d'une liste."""
return [self.suivant]
fils = property(_get_fils)
def afficher(self, personnage=None):
"""Retourne un affichage du masque pour les joueurs."""
return ""
|
stormi/tsunami
|
src/primaires/interpreteur/masque/noeuds/base_noeud.py
|
Python
|
bsd-3-clause
| 2,736
|
from math import isclose
import numpy as np
from pytest import fixture
from hoomd.box import Box
@fixture
def box_dict():
return dict(Lx=1, Ly=2, Lz=3, xy=1, xz=2, yz=3)
def test_base_constructor(box_dict):
box = Box(**box_dict)
for key in box_dict:
assert getattr(box, key) == box_dict[key]
@fixture
def base_box(box_dict):
return Box(**box_dict)
def test_cpp_python_correspondence(base_box):
cpp_obj = base_box._cpp_obj
cpp_L = cpp_obj.getL()
assert base_box.Lx == cpp_L.x and base_box.Ly == cpp_L.y \
and base_box.Lz == cpp_L.z
assert base_box.xy == cpp_obj.getTiltFactorXY()
assert base_box.xz == cpp_obj.getTiltFactorXZ()
assert base_box.yz == cpp_obj.getTiltFactorYZ()
def test_setting_lengths(base_box):
for attr in ['Lx', 'Ly', 'Lz']:
for L in np.linspace(1, 100, 10):
setattr(base_box, attr, L)
assert getattr(base_box, attr) == L
for L in np.linspace(1, 100, 10):
base_box.L = L
assert all(base_box.L == L)
base_box.L = [3, 2, 1]
assert all(base_box.L == [3, 2, 1])
def test_setting_tilts(base_box):
for attr in ['xy', 'xz', 'yz']:
for tilt in np.linspace(1, 100, 10):
setattr(base_box, attr, tilt)
assert getattr(base_box, attr) == tilt
for tilt in np.linspace(1, 100, 10):
base_box.tilts = tilt
assert all(base_box.tilts == tilt)
base_box.tilts = [3, 2, 1]
assert all(base_box.tilts == [3, 2, 1])
def test_is2D(base_box): # noqa: N802 - allow function name
base_box.Lz = 0
assert base_box.is2D
for L in np.linspace(1, 100, 10):
base_box.Lz = L
assert not base_box.is2D
def test_dimensions(base_box):
base_box.Lz = 0
assert base_box.dimensions == 2
for L in np.linspace(1, 100, 10):
base_box.Lz = L
assert base_box.dimensions == 3
def test_lattice_vectors(base_box):
expected_vectors = np.array([[1, 0, 0], [2, 2, 0], [6, 9, 3]],
dtype=np.float64)
assert np.allclose(base_box.lattice_vectors, expected_vectors)
box = Box.cube(4)
lattice_vectors = np.array([[4, 0, 0], [0, 4, 0], [0, 0, 4]])
assert np.allclose(box.lattice_vectors, lattice_vectors)
def get_aspect(L):
return np.array([L[0] / L[1], L[0] / L[2], L[1] / L[2]])
def test_scale(base_box):
aspect = get_aspect(base_box.L)
for s in np.linspace(0.5, 1.5, 10):
prev_vol = base_box.volume
base_box.scale(s)
assert np.allclose(aspect, get_aspect(base_box.L))
assert not isclose(prev_vol, base_box.volume)
L = base_box.L
s = np.array([1, 0.75, 0.5])
base_box.scale(s)
assert np.allclose(aspect * get_aspect(s), get_aspect(base_box.L))
assert np.allclose(base_box.L, L * s)
def test_volume(base_box):
assert isclose(base_box.volume, np.product(base_box.L))
for L in np.linspace(1, 10, 10):
box = Box.cube(L)
assert isclose(box.volume, L**3)
box = Box(L, L + 1, L + 2)
assert isclose(box.volume, L * (L + 1) * (L + 2))
def test_volume_setting(base_box):
aspect = get_aspect(base_box.L)
for v in np.linspace(1, 100, 10):
base_box.volume = v
assert np.allclose(aspect, get_aspect(base_box.L))
assert isclose(base_box.volume, v)
def test_periodic(base_box):
assert all(base_box.periodic)
@fixture
def expected_matrix(box_dict):
return np.array([
[
box_dict['Lx'], box_dict['Ly'] * box_dict['xy'],
box_dict['Lz'] * box_dict['xz']
],
[0, box_dict['Ly'], box_dict['Lz'] * box_dict['yz']],
[0, 0, box_dict['Lz']],
])
def test_matrix(base_box, expected_matrix):
assert np.allclose(base_box.matrix, expected_matrix)
base_box.xy *= 2
assert isclose(base_box.matrix[0, 1], 2 * expected_matrix[0, 1])
base_box.yz *= 0.5
assert isclose(base_box.matrix[1, 2], 0.5 * expected_matrix[1, 2])
base_box.Lx *= 3
assert isclose(base_box.matrix[0, 0], 3 * expected_matrix[0, 0])
@fixture
def new_box_matrix_dict():
Lx, Ly, Lz = 2, 4, 8
xy, xz, yz = 1, 3, 5
new_box_matrix = np.array([[Lx, Ly * xy, Lz * xz], [0, Ly, Lz * yz],
[0, 0, Lz]])
return dict(Lx=Lx, Ly=Ly, Lz=Lz, xy=xy, xz=xz, yz=yz, matrix=new_box_matrix)
def test_matrix_setting(base_box, new_box_matrix_dict):
base_box.matrix = new_box_matrix_dict['matrix']
assert np.allclose(new_box_matrix_dict['matrix'], base_box.matrix)
assert np.allclose(base_box.L, [
new_box_matrix_dict['Lx'], new_box_matrix_dict['Ly'],
new_box_matrix_dict['Lz']
])
assert np.allclose(base_box.tilts, [
new_box_matrix_dict['xy'], new_box_matrix_dict['xz'],
new_box_matrix_dict['yz']
])
def test_cube():
for L in np.linspace(1, 100, 10):
box = Box.cube(L)
assert all(box.L == L)
assert box.Lx == box.Ly == box.Lz == L
def test_square():
for L in np.linspace(1, 100, 10):
box = Box.square(L)
assert all(box.L == [L, L, 0])
assert box.Lx == box.Ly == L and box.Lz == 0
def test_from_matrix(new_box_matrix_dict):
box = Box.from_matrix(new_box_matrix_dict['matrix'])
assert np.allclose(new_box_matrix_dict['matrix'], box.matrix)
assert np.allclose(box.L, [
new_box_matrix_dict['Lx'], new_box_matrix_dict['Ly'],
new_box_matrix_dict['Lz']
])
assert np.allclose(box.tilts, [
new_box_matrix_dict['xy'], new_box_matrix_dict['xz'],
new_box_matrix_dict['yz']
])
def test_eq(base_box, box_dict):
box2 = Box(**box_dict)
assert base_box == box2
box2.Lx = 2
assert not base_box == box2
def test_neq(base_box, box_dict):
box2 = Box(**box_dict)
assert not base_box != box2
box2.Lx = 2
assert base_box != box2
|
joaander/hoomd-blue
|
hoomd/pytest/test_box.py
|
Python
|
bsd-3-clause
| 5,889
|
"""
This module provides the Koalix CRM core functionality
"""
|
tfroehlich82/koalixcrm
|
crm_core/__init__.py
|
Python
|
bsd-3-clause
| 64
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import api_select
prefixed_jobs = """
serial flow: [
job: 'top_quick1'
serial flow: [
job: 'top_x_quick2-1'
]
serial flow: [
job: 'top_x_quick2-2'
]
serial flow: [
job: 'top_x_quick2-3'
]
job: 'top_quick3'
parallel flow: (
serial flow: [
job: 'top_y_z_quick4a'
]
serial flow: [
job: 'quick4b'
]
job: 'top_y_quick5'
)
]
"""
def test_prefix(api_type, capsys):
with api_select.api(__file__, api_type) as api:
def job(name):
api.job(name, exec_time=0.5, max_fails=0, expect_invocations=0, expect_order=None, params=None)
api.flow_job()
job('quick1')
index = 0
for index in 1, 2, 3:
job('x_quick2-' + str(index))
job('quick3')
job('y_z_quick4')
job('y_quick5')
with serial(api, timeout=70, report_interval=3, job_name_prefix='top_', just_dump=True) as ctrl1:
ctrl1.invoke('quick1')
for index in 1, 2, 3:
with ctrl1.serial(timeout=20, report_interval=3, job_name_prefix='x_') as ctrl2:
ctrl2.invoke('quick2-' + str(index))
ctrl1.invoke('quick3')
with ctrl1.parallel(timeout=40, report_interval=3, job_name_prefix='y_') as ctrl2:
with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix='z_') as ctrl3a:
ctrl3a.invoke('quick4a')
# Reset prefix
with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix=None) as ctrl3b:
ctrl3b.invoke('quick4b')
ctrl2.invoke('quick5')
sout, _ = capsys.readouterr()
assert prefixed_jobs.strip() in sout
|
lechat/jenkinsflow
|
test/prefix_test.py
|
Python
|
bsd-3-clause
| 1,965
|
# -*- coding: utf-8 -*-
from django import template
from django_users.forms import CreateUserForm
#from django.utils.translation import ugettext as _
register = template.Library()
@register.inclusion_tag('users/templatetags/registration.html', takes_context = True)
def registration_form(context, form=None, *args, **kwargs):
if not form:
form = CreateUserForm
return {
'form': form,
}
|
AdrianRibao/django-users
|
django_users/templatetags/users.py
|
Python
|
bsd-3-clause
| 422
|
from hiveplotter import HivePlot
from networkx import nx
import random
from unittest import TestCase
SEED = 1
NTYPES = ['A', 'B', 'C']
class SimpleCase(TestCase):
def make_graph(self):
G = nx.fast_gnp_random_graph(30, 0.2, seed=SEED)
for node, data in G.nodes_iter(data=True):
data['ntype'] = random.choice(NTYPES)
for src, tgt, data in G.edges_iter(data=True):
data['weight'] = random.random()
return G
def test_simple(self):
G = self.make_graph()
H = HivePlot(G, node_class_attribute='ntype')
H.draw()
H.save_plot('./output/main.pdf')
def test_dump_cfg(self):
G = self.make_graph()
H = HivePlot(G, node_class_attribute='ntype')
H.draw()
print(H.dump_config())
if __name__ == '__main__':
tests = SimpleCase()
tests.test_simple()
|
clbarnes/hiveplotter
|
test/simple_tests.py
|
Python
|
bsd-3-clause
| 882
|
#!/usr/bin/env python
import argparse
import sys
import re
import settings
import psycopg2
import imp
########################################################################################
class UpdSource():
def __init__(self,in_source,last_update):
self.upd_dict=dict()
self.source=in_source
self.src_handler=None
self.last_update=last_update
if self.source=='file':
self._parse_file()
def _parse_file(self):
try:
src=open(settings.repo_file_name,'r')
except IOError as e:
print "ERROR! Cannot open file"
sys.exit(1)
try:
repo=imp.load_source('repo','.',src)
except Exception as e:
print "Error happened: {0}".format(e)
sys.exit(1)
for var in dir(repo):
if re.match('^upd_\d',var):
self.upd_dict[int(var[4:])]=eval('repo.'+str(var))
# lines=[]
# for line in src:
# if re.match('^upd_\d',line):
# lines.append(line.strip())
# elif len(line.strip()) == 0:
# continue
# else:
# lines[-1] += ' '+line.strip()
#
# for upd in lines:
# a=upd.split('=',1)
# if re.match('^upd_\d$',a[0]) and int(a[0][4:]) > self.last_update:
# self.upd_dict[int(a[0][4:])]=a[1][1:-1]
def get_upd_dict(self):
return self.upd_dict
def get_total_updates(self):
return len(self.upd_dict)
class DbState():
def __init__(self,in_db_conn):
self.db_conn=in_db_conn
self.last_applied=None
self.installed=-1
# self.check_installed()
def __del__(self):
if not self.db_conn.closed:
self.db_conn.close()
def get_last_applied(self):
if not self.last_applied:
cur=self.db_conn.cursor()
try:
cur.execute(settings.get_last_applied_stmt)
except Exception as e:
print "Error! Cannot get last applied update! {0}".format(e.pgerror)
return -1
self.last_applied=cur.fetchone()[0]
cur.close()
return self.last_applied
def _check_installed(self):
cur=self.db_conn.cursor()
try:
cur.execute(settings.get_install_check_stmt)
except Exception as e:
print "ERROR! Cannot determine installed! {0}".format(e.pgerror)
return False
self.installed=cur.fetchone()[0]
return True
def install(self):
cur=self.db_conn.cursor()
try:
cur.execute(settings.install_stmt)
except Exception as e:
print "ERROR! Cannot create db_update table!{0}".format(e.pgerror)
return False
else:
self.db_conn.commit()
print "Application successfully installed"
return True
def get_installed(self):
if self.installed == -1:
self._check_installed()
return self.installed
class Apply():
def __init__(self,in_db_conn):
self.db_conn=in_db_conn
self.num_applied=0
def __del__(self):
if not self.db_conn.closed:
self.db_conn.close()
def _apply_one_update(self,number,stmt,dry_run):
cur=self.db_conn.cursor()
try:
cur.mogrify(stmt)
except Exception as e:
print "ERROR! Mistake in update {0}{1}".format(number,e.pgerror)
return False
if dry_run:
print "upd_{0} => {1}".format(number,stmt)
else:
try:
cur.execute(stmt)
cur.execute(settings.confirm_stmt,(number,stmt))
except Exception as e:
print "ERROR! Cannot run update {0}\n{1}".format(number,e.pgerror)
# print "\n"+stmt+"\n"
return False
return True
def iterate_over(self,in_upd_dict,dry_run):
for num,stmt in sorted(us.get_upd_dict().iteritems()):
res=self._apply_one_update(num,stmt,dry_run)
if res and not dry_run:
self.db_conn.commit()
self.num_applied += 1
if args.verbose:
print "Update number {0} applied successfully"
elif not res:
break
def get_num_applied(self):
return self.num_applied
#########################################################################
parser = argparse.ArgumentParser(description='Database changes and updates tracking system')
parser.add_argument('-r',action='store_true',dest='dry_run',default=False,help="Show updates and exit")
parser.add_argument('-i','--install',action='store_true',default=False,help="Install application and go to dry_run mode")
parser.add_argument('-s',nargs=1,choices=['file','SQLite'],default='file',dest='source',
help="Source for updates. SQLite is not supported currently")
parser.add_argument('-v','--verbose',action='store_true',default=False,help="Show additional info on terminal")
parser.add_argument('-l','--last',dest='last_applied',action='store_true',default=False,
help="Show last applied update number and exit")
args=parser.parse_args()
if args.install:
args.dry_run=True
try:
conn=psycopg2.connect(settings.custom_dsn('db_handler_1'))
except Exception as e:
print "ERROR! Cannot connect to database {0}".format(e)
sys.exit(1)
db_st=DbState(conn)
installed=db_st.get_installed()
#last_applied=db_st.get_last_applied()
#if last_applied == -1:
# conn.close()
# sys.exit()
if installed == 0:
install=db_st.install()
if not install:
conn.close()
sys.exit(1)
elif installed == 1:
if args.install:
print "Application already installed"
elif installed == -1:
conn.close()
sys.exit(1)
if args.install:
conn.close()
sys.exit(1)
last_applied=db_st.get_last_applied()
if args.last_applied:
if last_applied == 0:
print "No updates applied"
else:
print "Last applied update: upd_{0}".format(last_applied)
conn.close()
sys.exit()
us=UpdSource(args.source,last_applied)
upd_dict=us.get_upd_dict()
ap=Apply(conn)
ap.iterate_over(upd_dict,args.dry_run)
if not args.dry_run:
print "Applied {0} updates out of {1}".format(ap.get_num_applied(),us.get_total_updates())
|
movsesyan1970/pg_repo
|
db_update.py
|
Python
|
bsd-3-clause
| 5,556
|
from streamable_archive_tests import *
from delivery_collection_tests import *
|
vegarang/devilry-django
|
devilry/utils/tests/__init__.py
|
Python
|
bsd-3-clause
| 81
|
"""
This module contains dsolve() and different helper functions that it
uses.
dsolve() solves ordinary differential equations. See the docstring on
the various functions for their uses. Note that partial differential
equations support is in pde.py. Note that ode_hint() functions have
docstrings describing their various methods, but they are intended for
internal use. Use dsolve(ode, func, hint=hint) to solve an ode using a
specific hint. See also the docstring on dsolve().
**Functions in this module**
These are the user functions in this module:
- dsolve() - Solves ODEs.
- classify_ode() - Classifies ODEs into possible hints for dsolve().
- checkodesol() - Checks if an equation is the solution to an ODE.
- ode_order() - Returns the order (degree) of an ODE.
- homogeneous_order() - Returns the homogeneous order of an
expression.
These are the non-solver helper functions that are for internal use.
The user should use the various options to dsolve() to obtain the
functionality provided by these functions:
- odesimp() - Does all forms of ODE simplification.
- ode_sol_simplicity() - A key function for comparing solutions by
simplicity.
- constantsimp() - Simplifies arbitrary constants.
- constant_renumber() - Renumber arbitrary constants
- _handle_Integral() - Evaluate unevaluated Integrals.
- preprocess - prepare the equation and detect function to solve for
See also the docstrings of these functions.
**Currently implemented solver methods**
The following methods are implemented for solving ordinary differential
equations. See the docstrings of the various ode_hint() functions for
more information on each (run help(ode)):
- 1st order separable differential equations
- 1st order differential equations whose coefficients or dx and dy
are functions homogeneous of the same order.
- 1st order exact differential equations.
- 1st order linear differential equations
- 1st order Bernoulli differential equations.
- 2nd order Liouville differential equations.
- nth order linear homogeneous differential equation with constant
coefficients.
- nth order linear inhomogeneous differential equation with constant
coefficients using the method of undetermined coefficients.
- nth order linear inhomogeneous differential equation with constant
coefficients using the method of variation of parameters.
**Philosophy behind this module**
This module is designed to make it easy to add new ODE solving methods
without having to mess with the solving code for other methods. The
idea is that there is a classify_ode() function, which takes in an ODE
and tells you what hints, if any, will solve the ODE. It does this
without attempting to solve the ODE, so it is fast. Each solving method
is a hint, and it has its own function, named ode_hint. That function
takes in the ODE and any match expression gathered by classify_ode and
returns a solved result. If this result has any integrals in it, the
ode_hint function will return an unevaluated Integral class. dsolve(),
which is the user wrapper function around all of this, will then call
odesimp() on the result, which, among other things, will attempt to
solve the equation for the dependent variable (the function we are
solving for), simplify the arbitrary constants in the expression, and
evaluate any integrals, if the hint allows it.
**How to add new solution methods**
If you have an ODE that you want dsolve() to be able to solve, try to
avoid adding special case code here. Instead, try finding a general
method that will solve your ODE, as well as others. This way, the ode
module will become more robust, and unhindered by special case hacks.
WolphramAlpha and Maple's DETools[odeadvisor] function are two resources
you can use to classify a specific ODE. It is also better for a method
to work with an nth order ODE instead of only with specific orders, if
possible.
To add a new method, there are a few things that you need to do. First,
you need a hint name for your method. Try to name your hint so that it
is unambiguous with all other methods, including ones that may not be
implemented yet. If your method uses integrals, also include a
"hint_Integral" hint. If there is more than one way to solve ODEs with
your method, include a hint for each one, as well as a "hint_best" hint.
Your ode_hint_best() function should choose the best using min with
ode_sol_simplicity as the key argument. See
ode_1st_homogeneous_coeff_best(), for example. The function that uses
your method will be called ode_hint(), so the hint must only use
characters that are allowed in a Python function name (alphanumeric
characters and the underscore '_' character). Include a function for
every hint, except for "_Integral" hints (dsolve() takes care of those
automatically). Hint names should be all lowercase, unless a word is
commonly capitalized (such as Integral or Bernoulli). If you have a hint
that you do not want to run with "all_Integral" that doesn't have an
"_Integral" counterpart (such as a best hint that would defeat the
purpose of "all_Integral"), you will need to remove it manually in the
dsolve() code. See also the classify_ode() docstring for guidelines on
writing a hint name.
Determine *in general* how the solutions returned by your method
compare with other methods that can potentially solve the same ODEs.
Then, put your hints in the allhints tuple in the order that they should
be called. The ordering of this tuple determines which hints are
default. Note that exceptions are ok, because it is easy for the user to
choose individual hints with dsolve(). In general, "_Integral" variants
should go at the end of the list, and "_best" variants should go before
the various hints they apply to. For example, the
"undetermined_coefficients" hint comes before the
"variation_of_parameters" hint because, even though variation of
parameters is more general than undetermined coefficients, undetermined
coefficients generally returns cleaner results for the ODEs that it can
solve than variation of parameters does, and it does not require
integration, so it is much faster.
Next, you need to have a match expression or a function that matches the
type of the ODE, which you should put in classify_ode() (if the match
function is more than just a few lines, like
_undetermined_coefficients_match(), it should go outside of
classify_ode()). It should match the ODE without solving for it as much
as possible, so that classify_ode() remains fast and is not hindered by
bugs in solving code. Be sure to consider corner cases. For example, if
your solution method involves dividing by something, make sure you
exclude the case where that division will be 0.
In most cases, the matching of the ODE will also give you the various
parts that you need to solve it. You should put that in a dictionary
(.match() will do this for you), and add that as matching_hints['hint']
= matchdict in the relevant part of classify_ode. classify_ode will
then send this to dsolve(), which will send it to your function as the
match argument. Your function should be named ode_hint(eq, func, order,
match). If you need to send more information, put it in the match
dictionary. For example, if you had to substitute in a dummy variable
in classify_ode to match the ODE, you will need to pass it to your
function using the match dict to access it. You can access the
independent variable using func.args[0], and the dependent variable (the
function you are trying to solve for) as func.func. If, while trying to
solve the ODE, you find that you cannot, raise NotImplementedError.
dsolve() will catch this error with the "all" meta-hint, rather than
causing the whole routine to fail.
Add a docstring to your function that describes the method employed.
Like with anything else in SymPy, you will need to add a doctest to the
docstring, in addition to real tests in test_ode.py. Try to maintain
consistency with the other hint functions' docstrings. Add your method
to the list at the top of this docstring. Also, add your method to
ode.rst in the docs/src directory, so that the Sphinx docs will pull its
docstring into the main SymPy documentation. Be sure to make the Sphinx
documentation by running "make html" from within the doc directory to
verify that the docstring formats correctly.
If your solution method involves integrating, use C.Integral() instead
of integrate(). This allows the user to bypass hard/slow integration by
using the "_Integral" variant of your hint. In most cases, calling
.doit() will integrate your solution. If this is not the case, you will
need to write special code in _handle_Integral(). Arbitrary constants
should be symbols named C1, C2, and so on. All solution methods should
return an equality instance. If you need an arbitrary number of
arbitrary constants, you can use constants =
numbered_symbols(prefix='C', cls=Symbol, start=1). If it is
possible to solve for the dependent function in a general way, do so.
Otherwise, do as best as you can, but do not call solve in your
ode_hint() function. odesimp() will attempt to solve the solution for
you, so you do not need to do that. Lastly, if your ODE has a common
simplification that can be applied to your solutions, you can add a
special case in odesimp() for it. For example, solutions returned from
the "1st_homogeneous_coeff" hints often have many log() terms, so
odesimp() calls logcombine() on them (it also helps to write the
arbitrary constant as log(C1) instead of C1 in this case). Also
consider common ways that you can rearrange your solution to have
constantsimp() take better advantage of it. It is better to put
simplification in odesimp() than in your method, because it can then be
turned off with the simplify flag in dsolve(). If you have any
extraneous simplification in your function, be sure to only run it using
"if match.get('simplify', True):", especially if it can be slow or if it
can reduce the domain of the solution.
Finally, as with every contribution to SymPy, your method will need to
be tested. Add a test for each method in test_ode.py. Follow the
conventions there, i.e., test the solver using dsolve(eq, f(x),
hint=your_hint), and also test the solution using checkodesol (you can
put these in a separate tests and skip/XFAIL if it runs too slow/doesn't
work). Be sure to call your hint specifically in dsolve, that way the
test won't be broken simply by the introduction of another matching
hint. If your method works for higher order (>1) ODEs, you will need to
run sol = constant_renumber(sol, 'C', 1, order), for each solution, where
order is the order of the ODE. This is because constant_renumber renumbers
the arbitrary constants by printing order, which is platform dependent.
Try to test every corner case of your solver, including a range of
orders if it is a nth order solver, but if your solver is slow, auch as
if it involves hard integration, try to keep the test run time down.
Feel free to refactor existing hints to avoid duplicating code or
creating inconsistencies. If you can show that your method exactly
duplicates an existing method, including in the simplicity and speed of
obtaining the solutions, then you can remove the old, less general
method. The existing code is tested extensively in test_ode.py, so if
anything is broken, one of those tests will surely fail.
"""
from collections import defaultdict
from sympy.core import Add, C, S, Mul, Pow, oo
from sympy.core.compatibility import iterable, is_sequence, set_union
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.core.function import Derivative, AppliedUndef, diff, expand_mul
from sympy.core.multidimensional import vectorize
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, Dummy
from sympy.core.sympify import sympify
from sympy.functions import cos, exp, im, log, re, sin, tan, sqrt, sign
from sympy.matrices import wronskian
from sympy.polys import Poly, RootOf, terms_gcd
from sympy.series import Order
from sympy.simplify import collect, logcombine, powsimp, separatevars, \
simplify, trigsimp, denom
from sympy.solvers import solve
from sympy.utilities import numbered_symbols, default_sort_key, sift
# This is a list of hints in the order that they should be applied. That means
# that, in general, hints earlier in the list should produce simpler results
# than those later for ODEs that fit both. This is just based on my own
# empirical observations, so if you find that *in general*, a hint later in
# the list is better than one before it, fell free to modify the list. Note
# however that you can easily override the hint used in dsolve() for a specific ODE
# (see the docstring). In general, "_Integral" hints should be grouped
# at the end of the list, unless there is a method that returns an unevaluable
# integral most of the time (which should surely go near the end of the list
# anyway).
# "default", "all", "best", and "all_Integral" meta-hints should not be
# included in this list, but "_best" and "_Integral" hints should be included.
allhints = ("separable", "1st_exact", "1st_linear", "Bernoulli", "Riccati_special_minus2",
"1st_homogeneous_coeff_best", "1st_homogeneous_coeff_subs_indep_div_dep",
"1st_homogeneous_coeff_subs_dep_div_indep", "nth_linear_constant_coeff_homogeneous",
"nth_linear_constant_coeff_undetermined_coefficients",
"nth_linear_constant_coeff_variation_of_parameters",
"Liouville", "separable_Integral", "1st_exact_Integral", "1st_linear_Integral",
"Bernoulli_Integral", "1st_homogeneous_coeff_subs_indep_div_dep_Integral",
"1st_homogeneous_coeff_subs_dep_div_indep_Integral",
"nth_linear_constant_coeff_variation_of_parameters_Integral",
"Liouville_Integral")
def preprocess(expr, func=None, hint='_Integral'):
"""Prepare expr for solving by making sure that differentiation
is done so that only func remains in unevaluated derivatives and
(if hint doesn't end with _Integral) that doit is applied to all
other derivatives. If hint is None, don't do any differentiation.
(Currently this may cause some simple differential equations to
fail.)
In case func is None, an attempt will be made to autodetect the
function to be solved for.
>>> from sympy.solvers.ode import preprocess
>>> from sympy import Derivative, Function, Integral, sin
>>> from sympy.abc import x, y, z
>>> f, g = map(Function, 'fg')
Apply doit to derivatives that contain more than the function
of interest:
>>> preprocess(Derivative(f(x) + x, x))
(Derivative(f(x), x) + 1, f(x))
Do others if the differentiation variable(s) intersect with those
of the function of interest or contain the function of interest:
>>> preprocess(Derivative(g(x), y, z), f(y))
(0, f(y))
>>> preprocess(Derivative(f(y), z), f(y))
(0, f(y))
Do others if the hint doesn't end in '_Integral' (the default
assumes that it does):
>>> preprocess(Derivative(g(x), y), f(x))
(Derivative(g(x), y), f(x))
>>> preprocess(Derivative(f(x), y), f(x), hint='')
(0, f(x))
Don't do any derivatives if hint is None:
>>> preprocess(Derivative(f(x) + 1, x) + Derivative(f(x), y), f(x), hint=None)
(Derivative(f(x) + 1, x) + Derivative(f(x), y), f(x))
If it's not clear what the function of interest is, it must be given:
>>> eq = Derivative(f(x) + g(x), x)
>>> preprocess(eq, g(x))
(Derivative(f(x), x) + Derivative(g(x), x), g(x))
>>> try: preprocess(eq)
... except ValueError: print "A ValueError was raised."
A ValueError was raised.
"""
derivs = expr.atoms(Derivative)
if not func:
funcs = set_union(*[d.atoms(AppliedUndef) for d in derivs])
if len(funcs) != 1:
raise ValueError('The function cannot be automatically detected for %s.' % expr)
func = funcs.pop()
fvars = set(func.args)
if hint is None:
return expr, func
reps = [(d, d.doit()) for d in derivs if
not hint.endswith('_Integral') or
d.has(func) or
set(d.variables) & fvars]
eq = expr.subs(reps)
return eq, func
def sub_func_doit(eq, func, new):
"""When replacing the func with something else, we usually
want the derivative evaluated, so this function helps in
making that happen.
To keep subs from having to look through all derivatives, we
mask them off with dummy variables, do the func sub, and then
replace masked off derivatives with their doit values.
Examples
========
>>> from sympy import Derivative, symbols, Function
>>> from sympy.solvers.ode import sub_func_doit
>>> x, z = symbols('x, z')
>>> y = Function('y')
>>> sub_func_doit(3*Derivative(y(x), x) - 1, y(x), x)
2
>>> sub_func_doit(x*Derivative(y(x), x) - y(x)**2 + y(x), y(x),
... 1/(x*(z + 1/x)))
x*(-1/(x**2*(z + 1/x)) + 1/(x**3*(z + 1/x)**2)) + 1/(x*(z + 1/x))
...- 1/(x**2*(z + 1/x)**2)
"""
reps = {}
repu = {}
for d in eq.atoms(Derivative):
u = C.Dummy('u')
repu[u] = d.subs(func, new).doit()
reps[d] = u
return eq.subs(reps).subs(func, new).subs(repu)
def dsolve(eq, func=None, hint="default", simplify=True, prep=True, **kwargs):
"""
Solves any (supported) kind of ordinary differential equation.
**Usage**
dsolve(eq, f(x), hint) -> Solve ordinary differential equation
eq for function f(x), using method hint.
**Details**
``eq`` can be any supported ordinary differential equation (see
the ode docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x)`` is a function of one variable whose derivatives in that
variable make up the ordinary differential equation eq. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want dsolve to use. Use
classify_ode(eq, f(x)) to get all of the possible hints for
an ODE. The default hint, 'default', will use whatever hint
is returned first by classify_ode(). See Hints below for
more options that you can use for hint.
``simplify`` enables simplification by odesimp(). See its
docstring for more information. Turn this off, for example,
to disable solving of solutions for func or simplification
of arbitrary constants. It will still integrate with this
hint. Note that the solution may contain more arbitrary
constants than the order of the ODE with this option
enabled.
``prep``, when False and when ``func`` is given, will skip the
preprocessing step where the equation is cleaned up so it
is ready for solving.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to dsolve():
"default":
This uses whatever hint is returned first by
classify_ode(). This is the default argument to
dsolve().
"all":
To make dsolve apply all relevant classification hints,
use dsolve(ODE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
dsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the ODE. See also ode_order().
- best: The simplest hint; what would be returned by
"best" below.
- best_hint: The hint that would produce the solution
given by 'best'. If more than one hint produces the
best solution, the first one in the tuple returned by
classify_ode() is chosen.
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_ode().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
dsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
"best":
To have dsolve() try all methods and return the simplest
one. This takes into account whether the solution is
solvable in the function, whether it contains any
Integral classes (i.e. unevaluatable integrals), and
which one is the shortest in size.
See also the classify_ode() docstring for more info on hints,
and the ode docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x # x is the independent variable
>>> f = Function("f")(x) # f is a function of x
>>> # f_ will be the derivative of f with respect to x
>>> f_ = Derivative(f, x)
- See test_ode.py for many tests, which serves also as a set of
examples for how to use dsolve().
- dsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). If possible, it
solves the solution explicitly for the function being solved
for. Otherwise, it returns an implicit solution.
- Arbitrary constants are symbols named C1, C2, and so on.
- Because all solutions should be mathematically equivalent,
some hints may return the exact same result for an ODE. Often,
though, two different hints will return the same solution
formatted differently. The two should be equivalent. Also
note that sometimes the values of the arbitrary constants in
two different solutions may not be the same, because one
constant may have "absorbed" other constants into it.
- Do help(ode.ode_hintname) to get help more information on a
specific hint, where hintname is the name of a hint without
"_Integral".
Examples
========
>>> from sympy import Function, dsolve, Eq, Derivative, sin, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(Derivative(f(x),x,x)+9*f(x), f(x))
f(x) == C1*sin(3*x) + C2*cos(3*x)
>>> dsolve(sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x), f(x),
... hint='separable', simplify=False)
-log(sin(f(x))**2 - 1)/2 == C1 + log(sin(x)**2 - 1)/2
>>> dsolve(sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x), f(x),
... hint='1st_exact')
f(x) == acos(C1/cos(x))
>>> dsolve(sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x), f(x),
... hint='best')
f(x) == acos(C1/cos(x))
>>> # Note that even though separable is the default, 1st_exact produces
>>> # a simpler result in this case.
"""
# TODO: Implement initial conditions
# See issue 1621. We first need a way to represent things like f'(0).
if isinstance(eq, Equality):
eq = eq.lhs - eq.rhs
# preprocess the equation and find func if not given
if prep or func is None:
eq, func = preprocess(eq, func)
prep = False
# Magic that should only be used internally. Prevents classify_ode from
# being called more than it needs to be by passing its results through
# recursive calls.
if kwargs.get('classify', True):
hints = classify_ode(eq, func, dict=True, prep=prep)
else:
# Here is what all this means:
#
# hint: The hint method given to dsolve() by the user.
# hints: The dictionary of hints that match the ODE, along with
# other information (including the internal pass-through magic).
# default: The default hint to return, the first hint from allhints
# that matches the hint. This is obtained from classify_ode().
# match: The hints dictionary contains a match dictionary for each hint
# (the parts of the ODE for solving). When going through the
# hints in "all", this holds the match string for the current
# hint.
# order: The order of the ODE, as determined by ode_order().
hints = kwargs.get('hint',
{'default': hint,
hint: kwargs['match'],
'order': kwargs['order']})
if hints['order'] == 0:
raise ValueError(str(eq) + " is not a differential equation in " + str(func))
if not hints['default']:
# classify_ode will set hints['default'] to None if no hints match.
raise NotImplementedError("dsolve: Cannot solve " + str(eq))
if hint == 'default':
return dsolve(eq, func, hint=hints['default'], simplify=simplify,
prep=prep, classify=False, order=hints['order'],
match=hints[hints['default']])
elif hint in ('all', 'all_Integral', 'best'):
retdict = {}
failedhints = {}
gethints = set(hints) - set(['order', 'default', 'ordered_hints'])
if hint == 'all_Integral':
for i in hints:
if i.endswith('_Integral'):
gethints.remove(i[:-len('_Integral')])
# special case
if "1st_homogeneous_coeff_best" in gethints:
gethints.remove("1st_homogeneous_coeff_best")
for i in gethints:
try:
sol = dsolve(eq, func, hint=i, simplify=simplify, prep=prep,
classify=False, order=hints['order'], match=hints[i])
except NotImplementedError, detail: # except NotImplementedError as detail:
failedhints[i] = detail
else:
retdict[i] = sol
retdict['best'] = min(retdict.values(), key=lambda x:
ode_sol_simplicity(x, func, trysolving=not simplify))
if hint == 'best':
return retdict['best']
for i in hints['ordered_hints']:
if retdict['best'] == retdict.get(i, None):
retdict['best_hint'] = i
break
retdict['default'] = hints['default']
retdict['order'] = sympify(hints['order'])
retdict.update(failedhints)
return retdict
elif hint not in allhints: # and hint not in ('default', 'ordered_hints'):
raise ValueError("Hint not recognized: " + hint)
elif hint not in hints:
raise ValueError("ODE " + str(eq) + " does not match hint " + hint)
elif hint.endswith('_Integral'):
solvefunc = globals()['ode_' + hint[:-len('_Integral')]]
else:
solvefunc = globals()['ode_' + hint] # convert the string into a function
# odesimp() will attempt to integrate, if necessary, apply constantsimp(),
# attempt to solve for func, and apply any other hint specific simplifications
if simplify:
rv = odesimp(solvefunc(eq, func, order=hints['order'],
match=hints[hint]), func, hints['order'], hint)
else:
# We still want to integrate (you can disable it separately with the hint)
r = hints[hint]
r['simplify'] = False # Some hints can take advantage of this option
rv = _handle_Integral(solvefunc(eq, func, order=hints['order'],
match=hints[hint]), func, hints['order'], hint)
return rv
def classify_ode(eq, func=None, dict=False, prep=True):
"""
Returns a tuple of possible dsolve() classifications for an ODE.
The tuple is ordered so that first item is the classification that
dsolve() uses to solve the ODE by default. In general,
classifications at the near the beginning of the list will produce
better solutions faster than those near the end, thought there are
always exceptions. To make dsolve use a different classification,
use dsolve(ODE, func, hint=<classification>). See also the dsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_ode() will return a dictionary of
hint:match expression terms. This is intended for internal use by
dsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
If ``prep`` is False or ``func`` is None then the equation
will be preprocessed to put it in standard form for classification.
You can get help on different hints by doing help(ode.ode_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.ode.allhints or the sympy.ode docstring for a list of all
supported hints that can be returned from classify_ode.
Notes
=====
These are remarks on hint names.
*"_Integral"*
If a classification has "_Integral" at the end, it will return
the expression with an unevaluated Integral class in it. Note
that a hint may do this anyway if integrate() cannot do the
integral, though just using an "_Integral" will do so much
faster. Indeed, an "_Integral" hint will always be faster than
its corresponding hint without "_Integral" because integrate()
is an expensive routine. If dsolve() hangs, it is probably
because integrate() is hanging on a tough or impossible
integral. Try using an "_Integral" hint or "all_Integral" to
get it return something.
Note that some hints do not have "_Integral" counterparts. This
is because integrate() is not used in solving the ODE for those
method. For example, nth order linear homogeneous ODEs with
constant coefficients do not require integration to solve, so
there is no "nth_linear_homogeneous_constant_coeff_Integrate"
hint. You can easily evaluate any unevaluated Integrals in an
expression by doing expr.doit().
*Ordinals*
Some hints contain an ordinal such as "1st_linear". This is to
help differentiate them from other hints, as well as from other
methods that may not be implemented yet. If a hint has "nth" in
it, such as the "nth_linear" hints, this means that the method
used to applies to ODEs of any order.
*"indep" and "dep"*
Some hints contain the words "indep" or "dep". These reference
the independent variable and the dependent function,
respectively. For example, if an ODE is in terms of f(x), then
"indep" will refer to x and "dep" will refer to f.
*"subs"*
If a hints has the word "subs" in it, it means the the ODE is
solved by substituting the expression given after the word
"subs" for a single dummy variable. This is usually in terms of
"indep" and "dep" as above. The substituted expression will be
written only in characters allowed for names of Python objects,
meaning operators will be spelled out. For example, indep/dep
will be written as indep_div_dep.
*"coeff"*
The word "coeff" in a hint refers to the coefficients of
something in the ODE, usually of the derivative terms. See the
docstring for the individual methods for more info (help(ode)).
This is contrast to "coefficients", as in
"undetermined_coefficients", which refers to the common name of
a method.
*"_best"*
Methods that have more than one fundamental way to solve will
have a hint for each sub-method and a "_best"
meta-classification. This will evaluate all hints and return the
best, using the same considerations as the normal "best"
meta-hint.
Examples
========
>>> from sympy import Function, classify_ode, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> classify_ode(Eq(f(x).diff(x), 0), f(x))
('separable', '1st_linear', '1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_dep_div_indep',
'nth_linear_constant_coeff_homogeneous', 'separable_Integral',
'1st_linear_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral')
>>> classify_ode(f(x).diff(x, 2) + 3*f(x).diff(x) + 2*f(x) - 4)
('nth_linear_constant_coeff_undetermined_coefficients',
'nth_linear_constant_coeff_variation_of_parameters',
'nth_linear_constant_coeff_variation_of_parameters_Integral')
"""
from sympy import expand
if func and len(func.args) != 1:
raise ValueError("dsolve() and classify_ode() only work with functions " + \
"of one variable")
if prep or func is None:
eq, func_ = preprocess(eq, func)
if func is None:
func = func_
x = func.args[0]
f = func.func
y = Dummy('y')
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_ode(eq.lhs-eq.rhs, func, prep=False)
eq = eq.lhs
order = ode_order(eq, f(x))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {"order": order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
df = f(x).diff(x)
a = Wild('a', exclude=[f(x)])
b = Wild('b', exclude=[f(x)])
c = Wild('c', exclude=[f(x)])
d = Wild('d', exclude=[df, f(x).diff(x, 2)])
e = Wild('e', exclude=[df])
k = Wild('k', exclude=[df])
n = Wild('n', exclude=[f(x)])
c1 = Wild('c1', exclude=[x])
a2 = Wild('a2', exclude=[x, f(x), df])
b2 = Wild('b2', exclude=[x, f(x), df])
c2 = Wild('c2', exclude=[x, f(x), df])
d2 = Wild('d2', exclude=[x, f(x), df])
eq = expand(eq)
# Precondition to try remove f(x) from highest order derivative
reduced_eq = None
if eq.is_Add:
deriv_coef = eq.coeff(f(x).diff(x, order))
if deriv_coef != 1:
r = deriv_coef.match(a*f(x)**c1)
if r and r[c1]:
den = f(x)**r[c1]
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
# Linear case: a(x)*y'+b(x)*y+c(x) == 0
if eq.is_Add:
ind, dep = reduced_eq.as_independent(f)
else:
u = Dummy('u')
ind, dep = (reduced_eq + u).as_independent(f)
ind, dep = [tmp.subs(u, 0) for tmp in [ind, dep]]
r = {a: dep.coeff(df),
b: dep.coeff(f(x)),
c: ind}
# double check f[a] since the preconditioning may have failed
if not r[a].has(f) and (r[a]*df + r[b]*f(x) + r[c]).expand() - reduced_eq == 0:
r['a'] = a
r['b'] = b
r['c'] = c
matching_hints["1st_linear"] = r
matching_hints["1st_linear_Integral"] = r
# Bernoulli case: a(x)*y'+b(x)*y+c(x)*y**n == 0
r = collect(reduced_eq, f(x), exact = True).match(a*df + b*f(x) + c*f(x)**n)
if r and r[c] != 0 and r[n] != 1: # See issue 1577
r['a'] = a
r['b'] = b
r['c'] = c
r['n'] = n
matching_hints["Bernoulli"] = r
matching_hints["Bernoulli_Integral"] = r
# Riccati special n == -2 case: a2*y'+b2*y**2+c2*y/x+d2/x**2 == 0
r = collect(reduced_eq, f(x), exact = True).match(a2*df + b2*f(x)**2 + c2*f(x)/x + d2/x**2)
if r and r[b2] != 0 and (r[c2] != 0 or r[d2] != 0):
r['a2'] = a2
r['b2'] = b2
r['c2'] = c2
r['d2'] = d2
matching_hints["Riccati_special_minus2"] = r
# Exact Differential Equation: P(x,y)+Q(x,y)*y'=0 where dP/dy == dQ/dx
# WITH NON-REDUCED FORM OF EQUATION
r = collect(eq, df, exact = True).match(d + e * df)
if r:
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = r[d].subs(f(x),y)
r[e] = r[e].subs(f(x),y)
try:
if r[d] != 0 and simplify(r[d].diff(y)) == simplify(r[e].diff(x)):
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
except NotImplementedError:
# Differentiating the coefficients might fail because of things
# like f(2*x).diff(x). See issue 1525 and issue 1620.
pass
# This match is used for several cases below; we now collect on
# f(x) so the matching works.
r = collect(reduced_eq, df, exact = True).match(d+e*df)
if r:
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = r[d].subs(f(x),y)
r[e] = r[e].subs(f(x),y)
# Separable Case: y' == P(y)*Q(x)
r[d] = separatevars(r[d])
r[e] = separatevars(r[e])
# m1[coeff]*m1[x]*m1[y] + m2[coeff]*m2[x]*m2[y]*y'
m1 = separatevars(r[d], dict=True, symbols=(x, y))
m2 = separatevars(r[e], dict=True, symbols=(x, y))
if m1 and m2:
r1 = {'m1':m1, 'm2':m2, 'y':y}
matching_hints["separable"] = r1
matching_hints["separable_Integral"] = r1
# First order equation with homogeneous coefficients:
# dy/dx == F(y/x) or dy/dx == F(x/y)
ordera = homogeneous_order(r[d], x, y)
orderb = homogeneous_order(r[e], x, y)
if ordera == orderb and ordera is not None:
# u1=y/x and u2=x/y
u1 = Dummy('u1')
u2 = Dummy('u2')
if simplify((r[d]+u1*r[e]).subs({x:1, y:u1})) != 0:
matching_hints["1st_homogeneous_coeff_subs_dep_div_indep"] = r
matching_hints["1st_homogeneous_coeff_subs_dep_div_indep_Integral"] = r
if simplify((r[e]+u2*r[d]).subs({x:u2, y:1})) != 0:
matching_hints["1st_homogeneous_coeff_subs_indep_div_dep"] = r
matching_hints["1st_homogeneous_coeff_subs_indep_div_dep_Integral"] = r
if "1st_homogeneous_coeff_subs_dep_div_indep" in matching_hints \
and "1st_homogeneous_coeff_subs_indep_div_dep" in matching_hints:
matching_hints["1st_homogeneous_coeff_best"] = r
if order == 2:
# Liouville ODE f(x).diff(x, 2) + g(f(x))*(f(x).diff(x))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98
s = d*f(x).diff(x, 2) + e*df**2 + k*df
r = reduced_eq.match(s)
if r and r[d] != 0:
y = Dummy('y')
g = simplify(r[e]/r[d]).subs(f(x), y)
h = simplify(r[k]/r[d])
if h.has(f(x)) or g.has(x):
pass
else:
r = {'g':g, 'h':h, 'y':y}
matching_hints["Liouville"] = r
matching_hints["Liouville_Integral"] = r
if order > 0:
# nth order linear ODE
# a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y = F(x) = b
r = _nth_linear_match(reduced_eq, func, order)
# Constant coefficient case (a_i is constant for all i)
if r and not any(r[i].has(x) for i in r if i >= 0):
# Inhomogeneous case: F(x) is not identically 0
if r[-1]:
undetcoeff = _undetermined_coefficients_match(r[-1], x)
matching_hints["nth_linear_constant_coeff_variation_of_parameters"] = r
matching_hints["nth_linear_constant_coeff_variation_of_parameters" + \
"_Integral"] = r
if undetcoeff['test']:
r['trialset'] = undetcoeff['trialset']
matching_hints["nth_linear_constant_coeff_undetermined_" + \
"coefficients"] = r
# Homogeneous case: F(x) is identically 0
else:
matching_hints["nth_linear_constant_coeff_homogeneous"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so we need to make note of which
# hint would come first for dsolve(). In Python 3, this should be replaced
# with an ordered dictionary.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
@vectorize(0)
def odesimp(eq, func, order, hint):
r"""
Simplifies ODEs, including trying to solve for func and running
constantsimp().
It may use knowledge of the type of solution that that hint returns
to apply additional simplifications.
It also attempts to integrate any Integrals in the expression, if
the hint is not an "_Integral" hint.
This function should have no effect on expressions returned by
dsolve(), as dsolve already calls odesimp(), but the individual hint
functions do not call odesimp (because the dsolve() wrapper does).
Therefore, this function is designed for mainly internal use.
Examples
========
>>> from sympy import sin, symbols, dsolve, pprint, Function
>>> from sympy.solvers.ode import odesimp
>>> x , u2, C1= symbols('x,u2,C1')
>>> f = Function('f')
>>> eq = dsolve(x*f(x).diff(x) - f(x) - x*sin(f(x)/x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral',
... simplify=False)
>>> pprint(eq)
x
----
f(x)
/
|
/f(x)\ | / 1 1 \
log|----| - | |- -- - -----------| d(u2) = 0
\ C1 / | | u2 2 /1 \|
| | u2 *sin|--||
| \ \u2//
|
/
>>> pprint(odesimp(eq, f(x), 1,
... hint='1st_homogeneous_coeff_subs_indep_div_dep'
... )) #doctest: +SKIP
x
--------- = C1
/f(x)\
tan|----|
\2*x /
"""
x = func.args[0]
f = func.func
C1 = Symbol('C1')
# First, integrate if the hint allows it.
eq = _handle_Integral(eq, func, order, hint)
assert isinstance(eq, Equality)
# Second, clean up the arbitrary constants.
# Right now, nth linear hints can put as many as 2*order constants in an
# expression. If that number grows with another hint, the third argument
# here should be raised accordingly, or constantsimp() rewritten to handle
# an arbitrary number of constants.
eq = constantsimp(eq, x, 2*order)
# Lastly, now that we have cleaned up the expression, try solving for func.
# When RootOf is implemented in solve(), we will want to return a RootOf
# everytime instead of an Equality.
# Get the f(x) on the left if possible.
if eq.rhs == func and not eq.lhs.has(func):
eq = [Eq(eq.rhs, eq.lhs)]
# make sure we are working with lists of solutions in simplified form.
if eq.lhs == func and not eq.rhs.has(func):
# The solution is already solved
eq = [eq]
# special simplification of the rhs
if hint.startswith("nth_linear_constant_coeff"):
# Collect terms to make the solution look nice.
# This is also necessary for constantsimp to remove unnecessary terms
# from the particular solution from variation of parameters
global collectterms
assert len(eq) == 1 and eq[0].lhs == f(x)
sol = eq[0].rhs
sol = expand_mul(sol)
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x)*sin(abs(imroot)*x))
sol = collect(sol, x**i*exp(reroot*x)*cos(imroot*x))
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x))
del collectterms
eq[0] = Eq(f(x), sol)
else:
# The solution is not solved, so try to solve it
try:
eqsol = solve(eq, func)
if not eqsol:
raise NotImplementedError
except NotImplementedError:
eq = [eq]
else:
def _expand(expr):
numer, denom = expr.as_numer_denom()
if denom.is_Add:
return expr
else:
return powsimp(expr.expand(), combine='exp', deep=True)
# XXX: the rest of odesimp() expects each ``t`` to be in a
# specific normal form: rational expression with numerator
# expanded, but with combined exponential functions (at
# least in this setup all tests pass).
eq = [Eq(f(x), _expand(t)) for t in eqsol]
# special simplification of the lhs.
if hint.startswith("1st_homogeneous_coeff"):
for j, eqi in enumerate(eq):
newi = logcombine(eqi, force=True)
if newi.lhs.is_Function and newi.lhs.func is log and newi.rhs == 0:
newi = Eq(newi.lhs.args[0]/C1, C1)
eq[j] = newi
# We cleaned up the costants before solving to help the solve engine with
# a simpler expression, but the solved expression could have introduced
# things like -C1, so rerun constantsimp() one last time before returning.
for i, eqi in enumerate(eq):
eq[i] = constant_renumber(constantsimp(eqi, x, 2*order), 'C', 1, 2*order)
# If there is only 1 solution, return it;
# otherwise return the list of solutions.
if len(eq) == 1:
eq = eq[0]
return eq
def checkodesol(ode, sol, func=None, order='auto', solve_for_func=True):
"""
Substitutes sol into the ode and checks that the result is 0.
This only works when func is one function, like f(x). sol can be a
single solution or a list of solutions. Each solution may be an Equality
that the solution satisfies, e.g. Eq(f(x), C1), Eq(f(x) + C1, 0); or simply
an Expr, e.g. f(x) - C1. In most cases it will not be necessary to
explicitly identify the function, but if the function cannot be inferred
from the original equation it can be supplied through the 'func' argument.
If a sequence of solutions is passed, the same sort of container will be used
to return the result for each solution.
It tries the following methods, in order, until it finds zero
equivalence:
1. Substitute the solution for f in the original equation. This
only works if the ode is solved for f. It will attempt to solve
it first unless solve_for_func == False
2. Take n derivatives of the solution, where n is the order of
ode, and check to see if that is equal to the solution. This
only works on exact odes.
3. Take the 1st, 2nd, ..., nth derivatives of the solution, each
time solving for the derivative of f of that order (this will
always be possible because f is a linear operator). Then back
substitute each derivative into ode in reverse order.
This function returns a tuple. The first item in the tuple is True
if the substitution results in 0, and False otherwise. The second
item in the tuple is what the substitution results in. It should
always be 0 if the first item is True. Note that sometimes this
function will False, but with an expression that is identically
equal to 0, instead of returning True. This is because simplify()
cannot reduce the expression to 0. If an expression returned by
this function vanishes identically, then sol really is a solution to
ode.
If this function seems to hang, it is probably because of a hard
simplification.
To use this function to test, test the first item of the tuple.
Examples
========
>>> from sympy import Eq, Function, checkodesol, symbols
>>> x, C1 = symbols('x,C1')
>>> f = Function('f')
>>> checkodesol(f(x).diff(x), Eq(f(x), C1))
(True, 0)
>>> assert checkodesol(f(x).diff(x), C1)[0]
>>> assert not checkodesol(f(x).diff(x), x)[0]
>>> checkodesol(f(x).diff(x, 2), x**2)
(False, 2)
"""
if not isinstance(ode, Equality):
ode = Eq(ode, 0)
if func is None:
try:
_, func = preprocess(ode.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (sol if is_sequence(sol, set) else [sol])]
funcs = reduce(set.union, funcs, set())
if len(funcs) != 1:
raise ValueError('must pass func arg to checkodesol for this case.')
func = funcs.pop()
# ========== deprecation handling
# After the deprecation period this handling section becomes:
# ----------
# if not is_unfunc(func) or len(func.args) != 1:
# raise ValueError("func must be a function of one variable, not %s" % func)
# ----------
# assume, during deprecation that sol and func are reversed
if isinstance(sol, AppliedUndef) and len(sol.args) == 1:
if isinstance(func, AppliedUndef) and len(func.args) == 1:
msg = "If you really do want sol to be just %s, use Eq(%s, 0) " % \
(sol, sol) + "instead."
else:
msg = ""
SymPyDeprecationWarning(msg, feature="The order of the "
"arguments sol and func to checkodesol()",
useinstead="checkodesol(ode, sol, func)", issue=3384,
).warn()
sol, func = func, sol
elif not (isinstance(func, AppliedUndef) and len(func.args) == 1):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent('''
func (or sol, during deprecation) must be a function
of one variable. Got sol = %s, func = %s''' % (sol, func)))
# ========== end of deprecation handling
if is_sequence(sol, set):
return type(sol)(map(lambda i: checkodesol(ode, i, order=order,
solve_for_func=solve_for_func), sol))
if not isinstance(sol, Equality):
sol = Eq(func, sol)
x = func.args[0]
s = True
testnum = 0
if order == 'auto':
order = ode_order(ode, func)
if solve_for_func and not (sol.lhs == func and not sol.rhs.has(func)) and not \
(sol.rhs == func and not sol.lhs.has(func)):
try:
solved = solve(sol, func)
if not solved:
raise NotImplementedError
except NotImplementedError:
pass
else:
if len(solved) == 1:
result = checkodesol(ode, Eq(func, solved[0]), \
order=order, solve_for_func=False)
else:
result = checkodesol(ode, [Eq(func, t) for t in solved],
order=order, solve_for_func=False)
return result
while s:
if testnum == 0:
# First pass, try substituting a solved solution directly into the ode
# This has the highest chance of succeeding.
ode_diff = ode.lhs - ode.rhs
if sol.lhs == func:
s = sub_func_doit(ode_diff, func, sol.rhs)
elif sol.rhs == func:
s = sub_func_doit(ode_diff, func, sol.lhs)
else:
testnum += 1
continue
ss = simplify(s)
if ss:
# with the new numer_denom in power.py, if we do a simple
# expansion then testnum == 0 verifies all solutions.
s = ss.expand()
else:
s = 0
testnum += 1
elif testnum == 1:
# Second pass. If we cannot substitute f, try seeing if the nth
# derivative is equal, this will only work for odes that are exact,
# by definition.
s = simplify(trigsimp(diff(sol.lhs, x, order) - diff(sol.rhs, x, order)) - \
trigsimp(ode.lhs) + trigsimp(ode.rhs))
# s2 = simplify(diff(sol.lhs, x, order) - diff(sol.rhs, x, order) - \
# ode.lhs + ode.rhs)
testnum += 1
elif testnum == 2:
# Third pass. Try solving for df/dx and substituting that into the ode.
# Thanks to Chris Smith for suggesting this method. Many of the
# comments below are his too.
# The method:
# - Take each of 1..n derivatives of the solution.
# - Solve each nth derivative for d^(n)f/dx^(n)
# (the differential of that order)
# - Back substitute into the ode in decreasing order
# (i.e., n, n-1, ...)
# - Check the result for zero equivalence
if sol.lhs == func and not sol.rhs.has(func):
diffsols = {0:sol.rhs}
elif sol.rhs == func and not sol.lhs.has(func):
diffsols = {0:sol.lhs}
else:
diffsols = {}
sol = sol.lhs - sol.rhs
for i in range(1, order + 1):
# Differentiation is a linear operator, so there should always
# be 1 solution. Nonetheless, we test just to make sure.
# We only need to solve once. After that, we will automatically
# have the solution to the differential in the order we want.
if i == 1:
ds = sol.diff(x)
try:
sdf = solve(ds,func.diff(x, i))
if not sdf:
raise NotImplementedError
except NotImplementedError:
testnum += 1
break
else:
diffsols[i] = sdf[0]
else:
# This is what the solution says df/dx should be.
diffsols[i] = diffsols[i - 1].diff(x)
# Make sure the above didn't fail.
if testnum > 2:
continue
else:
# Substitute it into ode to check for self consistency.
lhs, rhs = ode.lhs, ode.rhs
for i in range(order, -1, -1):
if i == 0 and 0 not in diffsols:
# We can only substitute f(x) if the solution was
# solved for f(x).
break
lhs = sub_func_doit(lhs, func.diff(x, i), diffsols[i])
rhs = sub_func_doit(rhs, func.diff(x, i), diffsols[i])
ode_or_bool = Eq(lhs,rhs)
ode_or_bool = simplify(ode_or_bool)
if isinstance(ode_or_bool, bool):
if ode_or_bool:
lhs = rhs = S.Zero
else:
lhs = ode_or_bool.lhs
rhs = ode_or_bool.rhs
# No sense in overworking simplify--just prove the numerator goes to zero
s = simplify(trigsimp((lhs-rhs).as_numer_denom()[0]))
testnum += 1
else:
break
if not s:
return (True, s)
elif s is True: # The code above never was able to change s
raise NotImplementedError("Unable to test if " + str(sol) + \
" is a solution to " + str(ode) + ".")
else:
return (False, s)
def ode_sol_simplicity(sol, func, trysolving=True):
"""
Returns an extended integer representing how simple a solution to an
ODE is.
The following things are considered, in order from most simple to
least:
- sol is solved for func.
- sol is not solved for func, but can be if passed to solve (e.g.,
a solution returned by dsolve(ode, func, simplify=False)
- If sol is not solved for func, then base the result on the length
of sol, as computed by len(str(sol)).
- If sol has any unevaluated Integrals, this will automatically be
considered less simple than any of the above.
This function returns an integer such that if solution A is simpler
than solution B by above metric, then ode_sol_simplicity(sola, func)
< ode_sol_simplicity(solb, func).
Currently, the following are the numbers returned, but if the
heuristic is ever improved, this may change. Only the ordering is
guaranteed.
sol solved for func -2
sol not solved for func but can be -1
sol is not solved or solvable for func len(str(sol))
sol contains an Integral oo
oo here means the SymPy infinity, which should compare greater than
any integer.
If you already know solve() cannot solve sol, you can use
trysolving=False to skip that step, which is the only potentially
slow step. For example, dsolve with the simplify=False flag should
do this.
If sol is a list of solutions, if the worst solution in the list
returns oo it returns that, otherwise it returns len(str(sol)), that
is, the length of the string representation of the whole list.
Examples
========
This function is designed to be passed to min as the key argument,
such as min(listofsolutions, key=lambda i: ode_sol_simplicity(i, f(x))).
>>> from sympy import symbols, Function, Eq, tan, cos, sqrt, Integral
>>> from sympy.solvers.ode import ode_sol_simplicity
>>> x, C1, C2 = symbols('x, C1, C2')
>>> f = Function('f')
>>> ode_sol_simplicity(Eq(f(x), C1*x**2), f(x))
-2
>>> ode_sol_simplicity(Eq(x**2 + f(x), C1), f(x))
-1
>>> ode_sol_simplicity(Eq(f(x), C1*Integral(2*x, x)), f(x))
oo
>>> eq1 = Eq(f(x)/tan(f(x)/(2*x)), C1)
>>> eq2 = Eq(f(x)/tan(f(x)/(2*x) + f(x)), C2)
>>> [ode_sol_simplicity(eq, f(x)) for eq in [eq1, eq2]]
[26, 33]
>>> min([eq1, eq2], key=lambda i: ode_sol_simplicity(i, f(x)))
f(x)/tan(f(x)/(2*x)) == C1
"""
#TODO: write examples
# See the docstring for the coercion rules. We check easier (faster)
# things here first, to save time.
if iterable(sol):
# See if there are Integrals
for i in sol:
if ode_sol_simplicity(i, func, trysolving=trysolving) == oo:
return oo
return len(str(sol))
if sol.has(C.Integral):
return oo
# Next, try to solve for func. This code will change slightly when RootOf
# is implemented in solve(). Probably a RootOf solution should fall somewhere
# between a normal solution and an unsolvable expression.
# First, see if they are already solved
if sol.lhs == func and not sol.rhs.has(func) or\
sol.rhs == func and not sol.lhs.has(func):
return -2
# We are not so lucky, try solving manually
if trysolving:
try:
sols = solve(sol, func)
if not sols:
raise NotImplementedError
except NotImplementedError:
pass
else:
return -1
# Finally, a naive computation based on the length of the string version
# of the expression. This may favor combined fractions because they
# will not have duplicate denominators, and may slightly favor expressions
# with fewer additions and subtractions, as those are separated by spaces
# by the printer.
# Additional ideas for simplicity heuristics are welcome, like maybe
# checking if a equation has a larger domain, or if constantsimp has
# introduced arbitrary constants numbered higher than the order of a
# given ode that sol is a solution of.
return len(str(sol))
@vectorize(0)
def constantsimp(expr, independentsymbol, endnumber, startnumber=1,
symbolname='C'):
"""
Simplifies an expression with arbitrary constants in it.
This function is written specifically to work with dsolve(), and is
not intended for general use.
Simplification is done by "absorbing" the arbitrary constants in to
other arbitrary constants, numbers, and symbols that they are not
independent of.
The symbols must all have the same name with numbers after it, for
example, C1, C2, C3. The symbolname here would be 'C', the
startnumber would be 1, and the end number would be 3. If the
arbitrary constants are independent of the variable x, then the
independent symbol would be x. There is no need to specify the
dependent function, such as f(x), because it already has the
independent symbol, x, in it.
Because terms are "absorbed" into arbitrary constants and because
constants are renumbered after simplifying, the arbitrary constants
in expr are not necessarily equal to the ones of the same name in
the returned result.
If two or more arbitrary constants are added, multiplied, or raised
to the power of each other, they are first absorbed together into a
single arbitrary constant. Then the new constant is combined into
other terms if necessary.
Absorption is done with limited assistance: terms of Adds are collected
to try join constants and powers with exponents that are Adds are expanded
so (C1*cos(x) + C2*cos(x))*exp(x) will simplify to C1*cos(x)*exp(x) and
exp(C1 + x) will be simplified to C1*exp(x).
Use constant_renumber() to renumber constants after simplification or else
arbitrary numbers on constants may appear, e.g. C1 + C3*x.
In rare cases, a single constant can be "simplified" into two
constants. Every differential equation solution should have as many
arbitrary constants as the order of the differential equation. The
result here will be technically correct, but it may, for example,
have C1 and C2 in an expression, when C1 is actually equal to C2.
Use your discretion in such situations, and also take advantage of
the ability to use hints in dsolve().
Examples
========
>>> from sympy import symbols
>>> from sympy.solvers.ode import constantsimp
>>> C1, C2, C3, x, y = symbols('C1,C2,C3,x,y')
>>> constantsimp(2*C1*x, x, 3)
C1*x
>>> constantsimp(C1 + 2 + x + y, x, 3)
C1 + x
>>> constantsimp(C1*C2 + 2 + x + y + C3*x, x, 3)
C1 + C3*x
"""
# This function works recursively. The idea is that, for Mul,
# Add, Pow, and Function, if the class has a constant in it, then
# we can simplify it, which we do by recursing down and
# simplifying up. Otherwise, we can skip that part of the
# expression.
constant_iter = numbered_symbols('C', start=startnumber)
constantsymbols = [constant_iter.next() for t in range(startnumber, endnumber + 1)]
constantsymbols_set = set(constantsymbols)
x = independentsymbol
if isinstance(expr, Equality):
# For now, only treat the special case where one side of the equation
# is a constant
if expr.lhs in constantsymbols_set:
return Eq(expr.lhs, constantsimp(expr.rhs + expr.lhs, x, endnumber,
startnumber, symbolname) - expr.lhs)
# this could break if expr.lhs is absorbed into another constant,
# but for now, the only solutions that return Eq's with a constant
# on one side are first order. At any rate, it will still be
# technically correct. The expression will just have too many
# constants in it
elif expr.rhs in constantsymbols_set:
return Eq(constantsimp(expr.lhs + expr.rhs, x, endnumber,
startnumber, symbolname) - expr.rhs, expr.rhs)
else:
return Eq(constantsimp(expr.lhs, x, endnumber, startnumber,
symbolname), constantsimp(expr.rhs, x, endnumber,
startnumber, symbolname))
if not expr.has(*constantsymbols):
return expr
else:
# ================ pre-processing ================
# collect terms to get constants together
def _take(i):
t = sorted([s for s in i.atoms(Symbol) if s in constantsymbols])
if not t:
return i
return t[0]
if not (expr.has(x) and x in expr.free_symbols):
return constantsymbols[0]
new_expr = terms_gcd(expr, clear=False, deep=True)
if new_expr.is_Mul:
# don't let C1*exp(x) + C2*exp(2*x) become exp(x)*(C1 + C2*exp(x))
infac = False
asfac = False
for m in new_expr.args:
if m.func is exp:
asfac = True
elif m.is_Add:
infac = any(fi.func is exp for t in m.args for fi in Mul.make_args(t))
if asfac and infac:
new_expr = expr
break
expr = new_expr
# don't allow a number to be factored out of an expression
# that has no denominator
if expr.is_Mul:
h, t = expr.as_coeff_Mul()
if h != 1 and (t.is_Add or denom(t) == 1):
args = list(Mul.make_args(t))
for i, a in enumerate(args):
if a.is_Add:
args[i] = h*a
expr = Mul._from_args(args)
break
# let numbers absorb into constants of an Add, perhaps
# in the base of a power, if all its terms have a constant
# symbol in them, e.g. sqrt(2)*(C1 + C2*x) -> C1 + C2*x
if expr.is_Mul:
d = sift(expr.args, lambda m: m.is_number == True)
num = d[True]
other = d[False]
con_set = set(constantsymbols)
if num:
for o in other:
b, e = o.as_base_exp()
if b.is_Add and all(a.args_cnc(cset=True, warn=False)[0] & con_set for a in b.args):
expr = sign(Mul(*num))*Mul._from_args(other)
break
if expr.is_Mul: # check again that it's still a Mul
i, d = expr.as_independent(x, strict=True)
newi = _take(i)
if newi != i:
expr = newi*d
elif expr.is_Add:
i, d = expr.as_independent(x, strict=True)
expr = _take(i) + d
if expr.is_Add:
terms = {}
for ai in expr.args:
i, d = ai.as_independent(x, strict=True, as_Add=False)
terms.setdefault(d, []).append(i)
expr = Add(*[k*Add(*v) for k, v in terms.items()])
# handle powers like exp(C0 + g(x)) -> C0*exp(g(x))
pows = [p for p in expr.atoms(C.Function, C.Pow) if
(p.is_Pow or p.func is exp) and
p.exp.is_Add and
p.exp.as_independent(x, strict=True)[1]]
if pows:
reps = []
for p in pows:
b, e = p.as_base_exp()
ei, ed = e.as_independent(x, strict=True)
e = _take(ei)
if e != ei or e in constantsymbols:
reps.append((p, e*b**ed))
expr = expr.subs(reps)
# a C1*C2 may have been introduced and the code below won't
# handle that so handle it now: once to handle the C1*C2
# and once to handle any C0*f(x) + C0*f(x)
for _ in range(2):
muls = [m for m in expr.atoms(Mul) if m.has(*constantsymbols)]
reps = []
for m in muls:
i, d = m.as_independent(x, strict=True)
newi = _take(i)
if newi != i:
reps.append((m, _take(i)*d))
expr = expr.subs(reps)
# ================ end of pre-processing ================
newargs = []
hasconst = False
isPowExp = False
reeval = False
for i in expr.args:
if i not in constantsymbols:
newargs.append(i)
else:
newconst = i
hasconst = True
if expr.is_Pow and i == expr.exp:
isPowExp = True
for i in range(len(newargs)):
isimp = constantsimp(newargs[i], x, endnumber, startnumber,
symbolname)
if isimp in constantsymbols:
reeval = True
hasconst = True
newconst = isimp
if expr.is_Pow and i == 1:
isPowExp = True
newargs[i] = isimp
if hasconst:
newargs = [i for i in newargs if i.has(x)]
if isPowExp:
newargs = newargs + [newconst] # Order matters in this case
else:
newargs = [newconst] + newargs
if expr.is_Pow and len(newargs) == 1:
newargs.append(S.One)
if expr.is_Function:
if (len(newargs) == 0 or hasconst and len(newargs) == 1):
return newconst
else:
newfuncargs = [constantsimp(t, x, endnumber, startnumber,
symbolname) for t in expr.args]
return expr.func(*newfuncargs)
else:
newexpr = expr.func(*newargs)
if reeval:
return constantsimp(newexpr, x, endnumber, startnumber,
symbolname)
else:
return newexpr
def constant_renumber(expr, symbolname, startnumber, endnumber):
"""
Renumber arbitrary constants in expr to have numbers 1 through N
where N is ``endnumber`` - ``startnumber`` + 1 at most.
This is a simple function that goes through and renumbers any Symbol
with a name in the form symbolname + num where num is in the range
from startnumber to endnumber.
Symbols are renumbered based on ``.sort_key()``, so they should be
numbered roughly in the order that they appear in the final, printed
expression. Note that this ordering is based in part on hashes, so
it can produce different results on different machines.
The structure of this function is very similar to that of
constantsimp().
Examples
========
>>> from sympy import symbols, Eq, pprint
>>> from sympy.solvers.ode import constant_renumber
>>> x, C0, C1, C2, C3, C4 = symbols('x,C:5')
Only constants in the given range (inclusive) are renumbered;
the renumbering always starts from 1:
>>> constant_renumber(C1 + C3 + C4, 'C', 1, 3)
C1 + C2 + C4
>>> constant_renumber(C0 + C1 + C3 + C4, 'C', 2, 4)
C0 + 2*C1 + C2
>>> constant_renumber(C0 + 2*C1 + C2, 'C', 0, 1)
C1 + 3*C2
>>> pprint(C2 + C1*x + C3*x**2)
2
C1*x + C2 + C3*x
>>> pprint(constant_renumber(C2 + C1*x + C3*x**2, 'C', 1, 3))
2
C1 + C2*x + C3*x
"""
if type(expr) in (set, list, tuple):
return type(expr)(map(lambda i: constant_renumber(i, symbolname=symbolname,
startnumber=startnumber, endnumber=endnumber), expr))
global newstartnumber
newstartnumber = 1
def _constant_renumber(expr, symbolname, startnumber, endnumber):
"""
We need to have an internal recursive function so that
newstartnumber maintains its values throughout recursive calls.
"""
constantsymbols = [Symbol(symbolname+"%d" % t) for t in range(startnumber,
endnumber + 1)]
global newstartnumber
if isinstance(expr, Equality):
return Eq(_constant_renumber(expr.lhs, symbolname, startnumber, endnumber),
_constant_renumber(expr.rhs, symbolname, startnumber, endnumber))
if type(expr) not in (Mul, Add, Pow) and not expr.is_Function and\
not expr.has(*constantsymbols):
# Base case, as above. We better hope there aren't constants inside
# of some other class, because they won't be renumbered.
return expr
elif expr in constantsymbols:
# Renumbering happens here
newconst = Symbol(symbolname + str(newstartnumber))
newstartnumber += 1
return newconst
else:
if expr.is_Function or expr.is_Pow:
return expr.func(*[_constant_renumber(x, symbolname, startnumber,
endnumber) for x in expr.args])
else:
sortedargs = list(expr.args)
# make a mapping to send all constantsymbols to S.One and use
# that to make sure that term ordering is not dependent on
# the indexed value of C
C_1 = [(ci, S.One) for ci in constantsymbols]
sortedargs.sort(key=lambda arg: default_sort_key(arg.subs(C_1)))
return expr.func(*[_constant_renumber(x, symbolname, startnumber,
endnumber) for x in sortedargs])
return _constant_renumber(expr, symbolname, startnumber, endnumber)
def _handle_Integral(expr, func, order, hint):
"""
Converts a solution with Integrals in it into an actual solution.
For most hints, this simply runs expr.doit()
"""
x = func.args[0]
f = func.func
if hint == "1st_exact":
global exactvars
x0 = exactvars['x0']
y0 = exactvars['y0']
y = exactvars['y']
tmpsol = expr.lhs.doit()
sol = 0
assert tmpsol.is_Add
for i in tmpsol.args:
if not i.has(x0) and not i.has(y0):
sol += i
assert sol != 0
sol = Eq(sol.subs(y, f(x)),expr.rhs) # expr.rhs == C1
del exactvars
elif hint == "1st_exact_Integral":
# FIXME: We still need to back substitute y
# y = exactvars['y']
# sol = expr.subs(y, f(x))
# For now, we are going to have to return an expression with f(x) replaced
# with y. Substituting results in the y's in the second integral
# becoming f(x), which prevents the integral from being evaluatable.
# For example, Integral(cos(f(x)), (x, x0, x)). If there were a way to
# do inert substitution, that could maybe be used here instead.
del exactvars
sol = expr
elif hint == "nth_linear_constant_coeff_homogeneous":
sol = expr
elif not hint.endswith("_Integral"):
sol = expr.doit()
else:
sol = expr
return sol
def ode_order(expr, func):
"""
Returns the order of a given ODE with respect to func.
This function is implemented recursively.
Examples
========
>>> from sympy import Function, ode_order
>>> from sympy.abc import x
>>> f, g = map(Function, ['f', 'g'])
>>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 +
... f(x).diff(x), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x))
3
"""
a = Wild('a', exclude=[func])
if expr.match(a):
return 0
if isinstance(expr, Derivative):
if expr.args[0] == func:
return len(expr.variables)
else:
order = 0
for arg in expr.args[0].args:
order = max(order, ode_order(arg, func) + len(expr.variables))
return order
else:
order = 0
for arg in expr.args:
order = max(order, ode_order(arg, func))
return order
# FIXME: replace the general solution in the docstring with
# dsolve(equation, hint='1st_exact_Integral'). You will need to be able
# to have assumptions on P and Q that dP/dy = dQ/dx.
def ode_1st_exact(eq, func, order, match):
r"""
Solves 1st order exact ordinary differential equations.
A 1st order differential equation is called exact if it is the total
differential of a function. That is, the differential equation
P(x, y)dx + Q(x, y)dy = 0 is exact if there is some function F(x, y)
such that P(x, y) = dF/dx and Q(x, y) = dF/dy (d here refers to the
partial derivative). It can be shown that a necessary and
sufficient condition for a first order ODE to be exact is that
dP/dy = dQ/dx. Then, the solution will be as given below::
>>> from sympy import Function, Eq, Integral, symbols, pprint
>>> x, y, t, x0, y0, C1= symbols('x,y,t,x0,y0,C1')
>>> P, Q, F= map(Function, ['P', 'Q', 'F'])
>>> pprint(Eq(Eq(F(x, y), Integral(P(t, y), (t, x0, x)) +
... Integral(Q(x0, t), (t, y0, y))), C1))
x y
/ /
| |
F(x, y) = | P(t, y) dt + | Q(x0, t) dt = C1
| |
/ /
x0 y0
Where the first partials of P and Q exist and are continuous in a
simply connected region.
A note: SymPy currently has no way to represent inert substitution on
an expression, so the hint '1st_exact_Integral' will return an integral
with dy. This is supposed to represent the function that you are
solving for.
Examples
========
>>> from sympy import Function, dsolve, cos, sin
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(cos(f(x)) - (x*sin(f(x)) - f(x)**2)*f(x).diff(x),
... f(x), hint='1st_exact')
x*cos(f(x)) + f(x)**3/3 == C1
References
==========
- http://en.wikipedia.org/wiki/Exact_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 73
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # d+e*diff(f(x),x)
C1 = Symbol('C1')
x0 = Dummy('x0')
y0 = Dummy('y0')
global exactvars # This is the only way to pass these dummy variables to
# _handle_Integral
exactvars = {'y0':y0, 'x0':x0, 'y':r['y']}
# If we ever get a Constant class, x0 and y0 should be constants, I think
sol = C.Integral(r[r['e']].subs(x,x0),(r['y'],y0,f(x)))+C.Integral(r[r['d']],(x,x0,x))
return Eq(sol, C1)
def ode_1st_homogeneous_coeff_best(eq, func, order, match):
r"""
Returns the best solution to an ODE from the two hints
'1st_homogeneous_coeff_subs_dep_div_indep' and
'1st_homogeneous_coeff_subs_indep_div_dep'.
This is as determined by ode_sol_simplicity().
See the ode_1st_homogeneous_coeff_subs_indep_div_dep() and
ode_1st_homogeneous_coeff_subs_dep_div_indep() docstrings for more
information on these hints. Note that there is no
'1st_homogeneous_coeff_best_Integral' hint.
Examples
========
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_best', simplify=False))
/ 2 \
| 3*x |
log|----- + 1|
| 2 |
/f(x)\ \f (x) /
log|----| + -------------- = 0
\ C1 / 3
References
==========
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
# There are two substitutions that solve the equation, u1=y/x and u2=x/y
# They produce different integrals, so try them both and see which
# one is easier.
sol1 = ode_1st_homogeneous_coeff_subs_indep_div_dep(eq,
func, order, match)
sol2 = ode_1st_homogeneous_coeff_subs_dep_div_indep(eq,
func, order, match)
simplify = match.get('simplify', True)
if simplify:
sol1 = odesimp(sol1, func, order, "1st_homogeneous_coeff_subs_indep_div_dep")
sol2 = odesimp(sol2, func, order, "1st_homogeneous_coeff_subs_dep_div_indep")
return min([sol1, sol2], key=lambda x: ode_sol_simplicity(x, func,
trysolving=not simplify))
def ode_1st_homogeneous_coeff_subs_dep_div_indep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution
u1 = <dependent variable>/<independent variable>.
This is a differential equation P(x, y) + Q(x, y)dy/dx = 0, that P
and Q are homogeneous of the same order. A function F(x, y) is
homogeneous of order n if F(xt, yt) = t**n*F(x, y). Equivalently,
F(x, y) can be rewritten as G(y/x) or H(x/y). See also the
docstring of homogeneous_order().
If the coefficients P and Q in the differential equation above are
homogeneous functions of the same order, then it can be shown that
the substitution y = u1*x (u1 = y/x) will turn the differential
equation into an equation separable in the variables x and u. If
h(u1) is the function that results from making the substitution
u1 = f(x)/x on P(x, f(x)) and g(u2) is the function that results
from the substitution on Q(x, f(x)) in the differential equation
P(x, f(x)) + Q(x, f(x))*diff(f(x), x) = 0, then the general solution
is::
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(f(x)/x) + h(f(x)/x)*f(x).diff(x)
>>> pprint(genform)
/f(x)\ /f(x)\ d
g|----| + h|----|*--(f(x))
\ x / \ x / dx
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep_Integral'))
f(x)
----
x
/
|
| -h(u1)
log(C1*x) - | ---------------- d(u1) = 0
| u1*h(u1) + g(u1)
|
/
Where u1*h(u1) + g(u1) != 0 and x != 0.
See also the docstrings of ode_1st_homogeneous_coeff_best() and
ode_1st_homogeneous_coeff_subs_indep_div_dep().
Examples
========
>>> from sympy import Function, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep', simplify=False))
/ 2 \
| f (x)|
/f(x)\ log|3 + -----|
log|----| | 2 |
/x \ \ x / \ x /
log|--| + --------- + -------------- = 0
\C1/ 3 3
References
==========
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u1 = Dummy('u1') # u1 == f(x)/x
r = match # d+e*diff(f(x),x)
C1 = Symbol('C1')
int = C.Integral((-r[r['e']]/(r[r['d']]+u1*r[r['e']])).subs({x:1, r['y']:u1}),
(u1, None, f(x)/x))
sol = logcombine(Eq(log(x), int + log(C1)), force=True)
return sol
def ode_1st_homogeneous_coeff_subs_indep_div_dep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution
u2 = <independent variable>/<dependent variable>.
This is a differential equation P(x, y) + Q(x, y)dy/dx = 0, that P
and Q are homogeneous of the same order. A function F(x, y) is
homogeneous of order n if F(xt, yt) = t**n*F(x, y). Equivalently,
F(x, y) can be rewritten as G(y/x) or H(x/y). See also the
docstring of homogeneous_order().
If the coefficients P and Q in the differential equation above are
homogeneous functions of the same order, then it can be shown that
the substitution x = u2*y (u2 = x/y) will turn the differential
equation into an equation separable in the variables y and u2. If
h(u2) is the function that results from making the substitution
u2 = x/f(x) on P(x, f(x)) and g(u2) is the function that results
from the substitution on Q(x, f(x)) in the differential equation
P(x, f(x)) + Q(x, f(x))*diff(f(x), x) = 0, then the general solution
is:
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(x/f(x)) + h(x/f(x))*f(x).diff(x)
>>> pprint(genform)
/ x \ / x \ d
g|----| + h|----|*--(f(x))
\f(x)/ \f(x)/ dx
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral'))
x
----
f(x)
/
|
| g(u2)
| ----------------- d(u2)
| -u2*g(u2) - h(u2)
|
/
<BLANKLINE>
f(x) = C1*e
Where u2*g(u2) + h(u2) != 0 and f(x) != 0.
See also the docstrings of ode_1st_homogeneous_coeff_best() and
ode_1st_homogeneous_coeff_subs_dep_div_indep().
Examples
========
>>> from sympy import Function, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep'))
___________
/ 2
/ 3*x
/ ----- + 1 *f(x) = C1
3 / 2
\/ f (x)
References
==========
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u2 = Dummy('u2') # u2 == x/f(x)
r = match # d+e*diff(f(x),x)
C1 = Symbol('C1')
int = C.Integral(simplify((-r[r['d']]/(r[r['e']]+u2*r[r['d']])).subs({x:u2, r['y']:1})),
(u2, None, x/f(x)))
sol = logcombine(Eq(log(f(x)), int + log(C1)), force=True)
return sol
# XXX: Should this function maybe go somewhere else?
def homogeneous_order(eq, *symbols):
"""
Returns the order n if g is homogeneous and None if it is not
homogeneous.
Determines if a function is homogeneous and if so of what order.
A function f(x,y,...) is homogeneous of order n if
f(t*x,t*y,t*...) == t**n*f(x,y,...).
If the function is of two variables, F(x, y), then f being
homogeneous of any order is equivalent to being able to rewrite
F(x, y) as G(x/y) or H(y/x). This fact is used to solve 1st order
ordinary differential equations whose coefficients are homogeneous
of the same order (see the docstrings of
ode.ode_1st_homogeneous_coeff_subs_indep_div_dep() and
ode.ode_1st_homogeneous_coeff_subs_indep_div_dep()
Symbols can be functions, but every argument of the function must be
a symbol, and the arguments of the function that appear in the
expression must match those given in the list of symbols. If a
declared function appears with different arguments than given in the
list of symbols, None is returned.
Examples
========
>>> from sympy import Function, homogeneous_order, sqrt
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> homogeneous_order(f(x), f(x)) is None
True
>>> homogeneous_order(f(x,y), f(y, x), x, y) is None
True
>>> homogeneous_order(f(x), f(x), x)
1
>>> homogeneous_order(x**2*f(x)/sqrt(x**2+f(x)**2), x, f(x))
2
>>> homogeneous_order(x**2+f(x), x, f(x)) is None
True
"""
from sympy.simplify.simplify import separatevars
if not symbols:
raise ValueError("homogeneous_order: no symbols were given.")
symset = set(symbols)
eq = sympify(eq)
# The following are not supported
if eq.has(Order, Derivative):
return None
# These are all constants
if (eq.is_Number or
eq.is_NumberSymbol or
eq.is_number
):
return S.Zero
# Replace all functions with dummy variables
dum = numbered_symbols(prefix='d', cls=Dummy)
newsyms = set()
for i in [j for j in symset if getattr(j, 'is_Function')]:
iargs = set(i.args)
if iargs.difference(symset):
return None
else:
dummyvar = dum.next()
eq = eq.subs(i, dummyvar)
symset.remove(i)
newsyms.add(dummyvar)
symset.update(newsyms)
if not eq.free_symbols & symset:
return None
# make the replacement of x with x*t and see if t can be factored out
t = Dummy('t', positive=True) # It is sufficient that t > 0
eqs = separatevars(eq.subs([(i, t*i) for i in symset]), [t], dict=True)[t]
if eqs is S.One:
return S.Zero # there was no term with only t
i, d = eqs.as_independent(t, as_Add=False)
b, e = d.as_base_exp()
if b == t:
return e
def ode_1st_linear(eq, func, order, match):
r"""
Solves 1st order linear differential equations.
These are differential equations of the form dy/dx _ P(x)*y = Q(x).
These kinds of differential equations can be solved in a general
way. The integrating factor exp(Integral(P(x), x)) will turn the
equation into a separable equation. The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint, diff, sin
>>> from sympy.abc import x
>>> f, P, Q = map(Function, ['f', 'P', 'Q'])
>>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x))
>>> pprint(genform)
d
P(x)*f(x) + --(f(x)) = Q(x)
dx
>>> pprint(dsolve(genform, f(x), hint='1st_linear_Integral'))
/ / \
| | |
| | / | /
| | | | |
| | | P(x) dx | - | P(x) dx
| | | | |
| | / | /
f(x) = |C1 + | Q(x)*e dx|*e
| | |
\ / /
Examples
========
>>> f = Function('f')
>>> pprint(dsolve(Eq(x*diff(f(x), x) - f(x), x**2*sin(x)),
... f(x), '1st_linear'))
f(x) = x*(C1 - cos(x))
References
==========
- http://en.wikipedia.org/wiki/Linear_differential_equation#First_order_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 92
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # a*diff(f(x),x) + b*f(x) + c
C1 = Symbol('C1')
t = exp(C.Integral(r[r['b']]/r[r['a']], x))
tt = C.Integral(t*(-r[r['c']]/r[r['a']]), x)
return Eq(f(x),(tt + C1)/t)
def ode_Bernoulli(eq, func, order, match):
r"""
Solves Bernoulli differential equations.
These are equations of the form dy/dx + P(x)*y = Q(x)*y**n, n != 1.
The substitution w = 1/y**(1-n) will transform an equation of this
form into one that is linear (see the docstring of
ode_1st_linear()). The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x, n
>>> f, P, Q = map(Function, ['f', 'P', 'Q'])
>>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x)*f(x)**n)
>>> pprint(genform)
d n
P(x)*f(x) + --(f(x)) = Q(x)*f (x)
dx
>>> pprint(dsolve(genform, f(x), hint='Bernoulli_Integral')) #doctest: +SKIP
1
----
1 - n
// / \ \
|| | | |
|| | / | / |
|| | | | | |
|| | (1 - n)* | P(x) dx | (-1 + n)* | P(x) dx|
|| | | | | |
|| | / | / |
f(x) = ||C1 + (-1 + n)* | -Q(x)*e dx|*e |
|| | | |
\\ / / /
Note that when n = 1, then the equation is separable (see the
docstring of ode_separable()).
>>> pprint(dsolve(Eq(f(x).diff(x) + P(x)*f(x), Q(x)*f(x)), f(x),
... hint='separable_Integral'))
f(x)
/
| /
| 1 |
| - dy = C1 + | (-P(x) + Q(x)) dx
| y |
| /
/
Examples
========
>>> from sympy import Function, dsolve, Eq, pprint, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(x*f(x).diff(x) + f(x), log(x)*f(x)**2),
... f(x), hint='Bernoulli'))
1
f(x) = -------------------
/ log(x) 1\
x*|C1 + ------ + -|
\ x x/
References
==========
- http://en.wikipedia.org/wiki/Bernoulli_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 95
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # a*diff(f(x),x) + b*f(x) + c*f(x)**n, n != 1
C1 = Symbol('C1')
t = exp((1-r[r['n']])*C.Integral(r[r['b']]/r[r['a']],x))
tt = (r[r['n']]-1)*C.Integral(t*r[r['c']]/r[r['a']],x)
return Eq(f(x),((tt + C1)/t)**(1/(1-r[r['n']])))
def ode_Riccati_special_minus2(eq, func, order, match):
r"""
The general Riccati equation has the form dy/dx = f(x)*y**2 + g(x)*y + h(x).
While it does not have a general solution [1], the "special" form,
dy/dx = a*y**2 - b*x**c, does have solutions in many cases [2]. This routine
returns a solution for a*dy/dx = b*y**2 + c*y/x + d/x**2 that is obtained by
using a suitable change of variables to reduce it to the special form and is
valid when neither a nor b are zero and either c or d is zero.
>>> from sympy.abc import x, y, a, b, c, d
>>> from sympy.solvers.ode import dsolve, checkodesol
>>> from sympy import pprint, Function
>>> f = Function('f')
>>> y = f(x)
>>> genform = a*y.diff(x) - (b*y**2 + c*y/x + d/x**2)
>>> sol = dsolve(genform, y)
>>> pprint(sol)
/ / __________________ \\
| __________________ | / 2 ||
| / 2 | \/ 4*b*d - (a + c) *log(x)||
-|a + c - \/ 4*b*d - (a + c) *tan|C1 + ----------------------------||
\ \ 2*a //
f(x) = -----------------------------------------------------------------------
2*b*x
>>> checkodesol(genform, sol, order=1)[0]
True
References
==========
1. http://www.maplesoft.com/support/help/Maple/view.aspx?path=odeadvisor/Riccati
2. http://eqworld.ipmnet.ru/en/solutions/ode/ode0106.pdf -
http://eqworld.ipmnet.ru/en/solutions/ode/ode0123.pdf
"""
x = func.args[0]
f = func.func
r = match # a2*diff(f(x),x) + b2*f(x) + c2*f(x)/x + d2/x**2
a2, b2, c2, d2 = [r[r[s]] for s in 'a2 b2 c2 d2'.split()]
C1 = Symbol('C1')
mu = sqrt(4*d2*b2 - (a2 - c2)**2)
return Eq(f(x), (a2 - c2 - mu*tan(mu/(2*a2)*log(x)+C1))/(2*b2*x))
def ode_Liouville(eq, func, order, match):
r"""
Solves 2nd order Liouville differential equations.
The general form of a Liouville ODE is
d^2y/dx^2 + g(y)*(dy/dx)**2 + h(x)*dy/dx. The general solution is:
>>> from sympy import Function, dsolve, Eq, pprint, diff
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = Eq(diff(f(x),x,x) + g(f(x))*diff(f(x),x)**2 +
... h(x)*diff(f(x),x), 0)
>>> pprint(genform)
2 2
d d d
g(f(x))*--(f(x)) + h(x)*--(f(x)) + ---(f(x)) = 0
dx dx 2
dx
>>> pprint(dsolve(genform, f(x), hint='Liouville_Integral'))
f(x)
/ /
| |
| / | /
| | | |
| - | h(x) dx | | g(y) dy
| | | |
| / | /
C1 + C2* | e dx + | e dy = 0
| |
/ /
Examples
========
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(diff(f(x), x, x) + diff(f(x), x)**2/f(x) +
... diff(f(x), x)/x, f(x), hint='Liouville'))
________________ ________________
[f(x) = -\/ C1 + C2*log(x) , f(x) = \/ C1 + C2*log(x) ]
References
==========
- Goldstein and Braun, "Advanced Methods for the Solution of
Differential Equations", pp. 98
- http://www.maplesoft.com/support/help/Maple/view.aspx?path=odeadvisor/Liouville
# indirect doctest
"""
# Liouville ODE f(x).diff(x, 2) + g(f(x))*(f(x).diff(x, 2))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98, as well as
# http://www.maplesoft.com/support/help/view.aspx?path=odeadvisor/Liouville
x = func.args[0]
f = func.func
r = match # f(x).diff(x, 2) + g*f(x).diff(x)**2 + h*f(x).diff(x)
y = r['y']
C1 = Symbol('C1')
C2 = Symbol('C2')
int = C.Integral(exp(C.Integral(r['g'], y)), (y, None, f(x)))
sol = Eq(int + C1*C.Integral(exp(-C.Integral(r['h'], x)), x) + C2, 0)
return sol
def _nth_linear_match(eq, func, order):
"""
Matches a differential equation to the linear form:
a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y + B(x) = 0
Returns a dict of order:coeff terms, where order is the order of the
derivative on each term, and coeff is the coefficient of that
derivative. The key -1 holds the function B(x). Returns None if
the ode is not linear. This function assumes that func has already
been checked to be good.
Examples
========
>>> from sympy import Function, cos, sin
>>> from sympy.abc import x
>>> from sympy.solvers.ode import _nth_linear_match
>>> f = Function('f')
>>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) +
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) -
... sin(x), f(x), 3)
{-1: x - sin(x), 0: -1, 1: cos(x) + 2, 2: x, 3: 1}
>>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) +
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) -
... sin(f(x)), f(x), 3) == None
True
"""
x = func.args[0]
one_x = set([x])
terms = dict([(i, S.Zero) for i in range(-1, order+1)])
for i in Add.make_args(eq):
if not i.has(func):
terms[-1] += i
else:
c, f = i.as_independent(func)
if not ((isinstance(f, Derivative) and set(f.variables) == one_x) or\
f == func):
return None
else:
terms[len(f.args[1:])] += c
return terms
def ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match, returns='sol'):
r"""
Solves an nth order linear homogeneous differential equation with
constant coefficients.
This is an equation of the form a_n*f(x)^(n) + a_(n-1)*f(x)^(n-1) +
... + a1*f'(x) + a0*f(x) = 0
These equations can be solved in a general manner, by taking the
roots of the characteristic equation a_n*m**n + a_(n-1)*m**(n-1) +
... + a1*m + a0 = 0. The solution will then be the sum of
Cn*x**i*exp(r*x) terms, for each where Cn is an arbitrary constant,
r is a root of the characteristic equation and i is is one of each
from 0 to the multiplicity of the root - 1 (for example, a root 3 of
multiplicity 2 would create the terms C1*exp(3*x) + C2*x*exp(3*x)).
The exponential is usually expanded for complex roots using Euler's
equation exp(I*x) = cos(x) + I*sin(x). Complex roots always come in
conjugate pars in polynomials with real coefficients, so the two
roots will be represented (after simplifying the constants) as
exp(a*x)*(C1*cos(b*x) + C2*sin(b*x)).
If SymPy cannot find exact roots to the characteristic equation, a
RootOf instance will be return in its stead.
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(f(x).diff(x, 5) + 10*f(x).diff(x) - 2*f(x), f(x),
... hint='nth_linear_constant_coeff_homogeneous')
... # doctest: +NORMALIZE_WHITESPACE
f(x) == C1*exp(x*RootOf(_x**5 + 10*_x - 2, 0)) +
C2*exp(x*RootOf(_x**5 + 10*_x - 2, 1)) +
C3*exp(x*RootOf(_x**5 + 10*_x - 2, 2)) +
C4*exp(x*RootOf(_x**5 + 10*_x - 2, 3)) +
C5*exp(x*RootOf(_x**5 + 10*_x - 2, 4))
Note that because this method does not involve integration, there is
no 'nth_linear_constant_coeff_homogeneous_Integral' hint.
The following is for internal use:
- returns = 'sol' returns the solution to the ODE.
- returns = 'list' returns a list of linearly independent
solutions, for use with non homogeneous solution methods like
variation of parameters and undetermined coefficients. Note that,
though the solutions should be linearly independent, this function
does not explicitly check that. You can do "assert
simplify(wronskian(sollist)) != 0" to check for linear independence.
Also, "assert len(sollist) == order" will need to pass.
- returns = 'both', return a dictionary {'sol':solution to ODE,
'list': list of linearly independent solutions}.
Examples
========
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 4) + 2*f(x).diff(x, 3) -
... 2*f(x).diff(x, 2) - 6*f(x).diff(x) + 5*f(x), f(x),
... hint='nth_linear_constant_coeff_homogeneous'))
x -2*x
f(x) = (C1 + C2*x)*e + (C3*sin(x) + C4*cos(x))*e
References
==========
- http://en.wikipedia.org/wiki/Linear_differential_equation
section: Nonhomogeneous_equation_with_constant_coefficients
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 211
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match
# A generator of constants
constants = numbered_symbols(prefix='C', cls=Symbol, start=1)
# First, set up characteristic equation.
chareq, symbol = S.Zero, Dummy('x')
for i in r.keys():
if type(i) == str or i < 0:
pass
else:
chareq += r[i]*symbol**i
chareq = Poly(chareq, symbol)
chareqroots = [ RootOf(chareq, k) for k in xrange(chareq.degree()) ]
# Create a dict root: multiplicity or charroots
charroots = defaultdict(int)
for root in chareqroots:
charroots[root] += 1
gsol = S(0)
# We need keep track of terms so we can run collect() at the end.
# This is necessary for constantsimp to work properly.
global collectterms
collectterms = []
for root, multiplicity in charroots.items():
for i in range(multiplicity):
if isinstance(root, RootOf):
gsol += exp(root*x)*constants.next()
assert multiplicity == 1
collectterms = [(0, root, 0)] + collectterms
else:
reroot = re(root)
imroot = im(root)
gsol += x**i*exp(reroot*x)*(constants.next()*sin(abs(imroot)*x) \
+ constants.next()*cos(imroot*x))
# This ordering is important
collectterms = [(i, reroot, imroot)] + collectterms
if returns == 'sol':
return Eq(f(x), gsol)
elif returns in ('list' 'both'):
# Create a list of (hopefully) linearly independent solutions
gensols = []
# Keep track of when to use sin or cos for nonzero imroot
for i, reroot, imroot in collectterms:
if imroot == 0:
gensols.append(x**i*exp(reroot*x))
else:
if x**i*exp(reroot*x)*sin(abs(imroot)*x) in gensols:
gensols.append(x**i*exp(reroot*x)*cos(imroot*x))
else:
gensols.append(x**i*exp(reroot*x)*sin(abs(imroot)*x))
if returns == 'list':
return gensols
else:
return {'sol':Eq(f(x), gsol), 'list':gensols}
else:
raise ValueError('Unknown value for key "returns".')
def ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match):
r"""
Solves an nth order linear differential equation with constant
coefficients using the method of undetermined coefficients.
This method works on differential equations of the form a_n*f(x)^(n)
+ a_(n-1)*f(x)^(n-1) + ... + a1*f'(x) + a0*f(x) = P(x), where P(x)
is a function that has a finite number of linearly independent
derivatives.
Functions that fit this requirement are finite sums functions of the
form a*x**i*exp(b*x)*sin(c*x + d) or a*x**i*exp(b*x)*cos(c*x + d),
where i is a non-negative integer and a, b, c, and d are constants.
For example any polynomial in x, functions like x**2*exp(2*x),
x*sin(x), and exp(x)*cos(x) can all be used. Products of sin's and
cos's have a finite number of derivatives, because they can be
expanded into sin(a*x) and cos(b*x) terms. However, SymPy currently
cannot do that expansion, so you will need to manually rewrite the
expression in terms of the above to use this method. So, for example,
you will need to manually convert sin(x)**2 into (1 + cos(2*x))/2 to
properly apply the method of undetermined coefficients on it.
This method works by creating a trial function from the expression
and all of its linear independent derivatives and substituting them
into the original ODE. The coefficients for each term will be a
system of linear equations, which are be solved for and substituted,
giving the solution. If any of the trial functions are linearly
dependent on the solution to the homogeneous equation, they are
multiplied by sufficient x to make them linearly independent.
Examples
========
>>> from sympy import Function, dsolve, pprint, exp, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 2) + 2*f(x).diff(x) + f(x) -
... 4*exp(-x)*x**2 + cos(2*x), f(x),
... hint='nth_linear_constant_coeff_undetermined_coefficients'))
/ 4\
| x | -x 4*sin(2*x) 3*cos(2*x)
f(x) = |C1 + C2*x + --|*e - ---------- + ----------
\ 3 / 25 25
References
==========
- http://en.wikipedia.org/wiki/Method_of_undetermined_coefficients
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 221
# indirect doctest
"""
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_undetermined_coefficients(eq, func, order, match)
def _solve_undetermined_coefficients(eq, func, order, match):
"""
Helper function for the method of undetermined coefficients.
See the ode_nth_linear_constant_coeff_undetermined_coefficients()
docstring for more information on this method.
match should be a dictionary that has the following keys:
'list' - A list of solutions to the homogeneous equation, such as
the list returned by
ode_nth_linear_constant_coeff_homogeneous(returns='list')
'sol' - The general solution, such as the solution returned by
ode_nth_linear_constant_coeff_homogeneous(returns='sol')
'trialset' - The set of trial functions as returned by
_undetermined_coefficients_match()['trialset']
"""
x = func.args[0]
f = func.func
r = match
coeffs = numbered_symbols('a', cls=Dummy)
coefflist = []
gensols = r['list']
gsol = r['sol']
trialset = r['trialset']
notneedset = set([])
newtrialset = set([])
global collectterms
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) + \
" solutions to the homogeneous equation nessesary to apply " + \
"undetermined coefficients to " + str(eq) + " (number of terms != order)")
usedsin = set([])
mult = 0 # The multiplicity of the root
getmult = True
for i, reroot, imroot in collectterms:
if getmult:
mult = i + 1
getmult = False
if i == 0:
getmult = True
if imroot:
# Alternate between sin and cos
if (i, reroot) in usedsin:
check = x**i*exp(reroot*x)*cos(imroot*x)
else:
check = x**i*exp(reroot*x)*sin(abs(imroot)*x)
usedsin.add((i, reroot))
else:
check = x**i*exp(reroot*x)
if check in trialset:
# If an element of the trial function is already part of the homogeneous
# solution, we need to multiply by sufficient x to make it linearly
# independent. We also don't need to bother checking for the coefficients
# on those elements, since we already know it will be 0.
while True:
if check*x**mult in trialset:
mult += 1
else:
break
trialset.add(check*x**mult)
notneedset.add(check)
newtrialset = trialset - notneedset
trialfunc = 0
for i in newtrialset:
c = coeffs.next()
coefflist.append(c)
trialfunc += c*i
eqs = sub_func_doit(eq, f(x), trialfunc)
coeffsdict = dict(zip(trialset, [0]*(len(trialset) + 1)))
eqs = expand_mul(eqs)
for i in Add.make_args(eqs):
s = separatevars(i, dict=True, symbols=[x])
coeffsdict[s[x]] += s['coeff']
coeffvals = solve(coeffsdict.values(), coefflist)
if not coeffvals:
raise NotImplementedError("Could not solve " + str(eq) + " using the " + \
" method of undetermined coefficients (unable to solve for coefficients).")
psol = trialfunc.subs(coeffvals)
return Eq(f(x), gsol.rhs + psol)
def _undetermined_coefficients_match(expr, x):
"""
Returns a trial function match if undetermined coefficients can be
applied to expr, and None otherwise.
A trial expression can be found for an expression for use with the
method of undetermined coefficients if the expression is an
additive/multiplicative combination of constants, polynomials in x
(the independent variable of expr), sin(a*x + b), cos(a*x + b), and
exp(a*x) terms (in other words, it has a finite number of linearly
independent derivatives).
Note that you may still need to multiply each term returned here by
sufficient x to make it linearly independent with the solutions to
the homogeneous equation.
This is intended for internal use by undetermined_coefficients
hints.
SymPy currently has no way to convert sin(x)**n*cos(y)**m into a sum
of only sin(a*x) and cos(b*x) terms, so these are not implemented.
So, for example, you will need to manually convert sin(x)**2 into
(1 + cos(2*x))/2 to properly apply the method of undetermined
coefficients on it.
Examples
========
>>> from sympy import log, exp
>>> from sympy.solvers.ode import _undetermined_coefficients_match
>>> from sympy.abc import x
>>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x)
{'test': True, 'trialset': set([x*exp(x), exp(-x), exp(x)])}
>>> _undetermined_coefficients_match(log(x), x)
{'test': False}
"""
from sympy import S
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1)
retdict = {}
def _test_term(expr, x):
"""
Test if expr fits the proper form for undetermined coefficients.
"""
if expr.is_Add:
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Mul:
if expr.has(sin, cos):
foundtrig = False
# Make sure that there is only one trig function in the args.
# See the docstring.
for i in expr.args:
if i.has(sin, cos):
if foundtrig:
return False
else:
foundtrig = True
return all(_test_term(i, x) for i in expr.args)
elif expr.is_Function:
if expr.func in (sin, cos, exp):
if expr.args[0].match(a*x + b):
return True
else:
return False
else:
return False
elif expr.is_Pow and expr.base.is_Symbol and expr.exp.is_Integer and \
expr.exp >= 0:
return True
elif expr.is_Pow and expr.base.is_number:
if expr.exp.match(a*x + b):
return True
else:
return False
elif expr.is_Symbol or expr.is_Number:
return True
else:
return False
def _get_trial_set(expr, x, exprs=set([])):
"""
Returns a set of trial terms for undetermined coefficients.
The idea behind undetermined coefficients is that the terms
expression repeat themselves after a finite number of
derivatives, except for the coefficients (they are linearly
dependent). So if we collect these, we should have the terms of
our trial function.
"""
def _remove_coefficient(expr, x):
"""
Returns the expression without a coefficient.
Similar to expr.as_independent(x)[1], except it only works
multiplicatively.
"""
# I was using the below match, but it doesn't always put all of the
# coefficient in c. c.f. 2**x*6*exp(x)*log(2)
# The below code is probably cleaner anyway.
# c = Wild('c', exclude=[x])
# t = Wild('t')
# r = expr.match(c*t)
term = S.One
if expr.is_Mul:
for i in expr.args:
if i.has(x):
term *= i
elif expr.has(x):
term = expr
return term
expr = expand_mul(expr)
if expr.is_Add:
for term in expr.args:
if _remove_coefficient(term, x) in exprs:
pass
else:
exprs.add(_remove_coefficient(term, x))
exprs = exprs.union(_get_trial_set(term, x, exprs))
else:
term = _remove_coefficient(expr, x)
tmpset = exprs.union(set([term]))
oldset = set([])
while tmpset != oldset:
# If you get stuck in this loop, then _test_term is probably broken
oldset = tmpset.copy()
expr = expr.diff(x)
term = _remove_coefficient(expr, x)
if term.is_Add:
tmpset = tmpset.union(_get_trial_set(term, x, tmpset))
else:
tmpset.add(term)
exprs = tmpset
return exprs
retdict['test'] = _test_term(expr, x)
if retdict['test']:
# Try to generate a list of trial solutions that will have the undetermined
# coefficients. Note that if any of these are not linearly independent
# with any of the solutions to the homogeneous equation, then they will
# need to be multiplied by sufficient x to make them so. This function
# DOES NOT do that (it doesn't even look at the homogeneous equation).
retdict['trialset'] = _get_trial_set(expr, x)
return retdict
def ode_nth_linear_constant_coeff_variation_of_parameters(eq, func, order, match):
r"""
Solves an nth order linear differential equation with constant
coefficients using the method of variation of parameters.
This method works on any differential equations of the form
f(x)^(n) + a_(n-1)*f(x)^(n-1) + ... + a1*f'(x) + a0*f(x) = P(x).
This method works by assuming that the particular solution takes the
form Sum(c_i(x)*y_i(x), (x, 1, n)), where y_i is the ith solution to
the homogeneous equation. The solution is then solved using
Wronskian's and Cramer's Rule. The particular solution is given by
Sum(Integral(W_i(x)/W(x), x)*y_i(x), (x, 1, n)), where W(x) is the
Wronskian of the fundamental system (the system of n linearly
independent solutions to the homogeneous equation), and W_i(x) is
the Wronskian of the fundamental system with the ith column replaced
with [0, 0, ..., 0, P(x)].
This method is general enough to solve any nth order inhomogeneous
linear differential equation with constant coefficients, but
sometimes SymPy cannot simplify the Wronskian well enough to
integrate it. If this method hangs, try using the
'nth_linear_constant_coeff_variation_of_parameters_Integral' hint
and simplifying the integrals manually. Also, prefer using
'nth_linear_constant_coeff_undetermined_coefficients' when it
applies, because it doesn't use integration, making it faster and
more reliable.
Warning, using simplify=False with
'nth_linear_constant_coeff_variation_of_parameters' in dsolve()
may cause it to hang, because it will not attempt to simplify
the Wronskian before integrating. It is recommended that you only
use simplify=False with
'nth_linear_constant_coeff_variation_of_parameters_Integral' for
this method, especially if the solution to the homogeneous
equation has trigonometric functions in it.
Examples
========
>>> from sympy import Function, dsolve, pprint, exp, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 3) - 3*f(x).diff(x, 2) +
... 3*f(x).diff(x) - f(x) - exp(x)*log(x), f(x),
... hint='nth_linear_constant_coeff_variation_of_parameters'))
/ 3 \
| 2 x *(6*log(x) - 11)| x
f(x) = |C1 + C2*x + C3*x + ------------------|*e
\ 36 /
References
==========
- http://en.wikipedia.org/wiki/Variation_of_parameters
- http://planetmath.org/encyclopedia/VariationOfParameters.html
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 233
# indirect doctest
"""
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_variation_of_parameters(eq, func, order, match)
def _solve_variation_of_parameters(eq, func, order, match):
"""
Helper function for the method of variation of parameters.
See the ode_nth_linear_constant_coeff_variation_of_parameters()
docstring for more information on this method.
match should be a dictionary that has the following keys:
'list' - A list of solutions to the homogeneous equation, such as
the list returned by
ode_nth_linear_constant_coeff_homogeneous(returns='list')
'sol' - The general solution, such as the solution returned by
ode_nth_linear_constant_coeff_homogeneous(returns='sol')
"""
x = func.args[0]
f = func.func
r = match
psol = 0
gensols = r['list']
gsol = r['sol']
wr = wronskian(gensols, x)
if r.get('simplify', True):
wr = simplify(wr) # We need much better simplification for some ODEs.
# See issue 1563, for example.
# To reduce commonly occuring sin(x)**2 + cos(x)**2 to 1
wr = trigsimp(wr, deep=True, recursive=True)
if not wr:
# The wronskian will be 0 iff the solutions are not linearly independent.
raise NotImplementedError("Cannot find " + str(order) + \
" solutions to the homogeneous equation nessesary to apply " + \
"variation of parameters to " + str(eq) + " (Wronskian == 0)")
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) + \
" solutions to the homogeneous equation nessesary to apply " + \
"variation of parameters to " + str(eq) + " (number of terms != order)")
negoneterm = (-1)**(order)
for i in gensols:
psol += negoneterm*C.Integral(wronskian(filter(lambda x: x != i, \
gensols), x)*r[-1]/wr, x)*i/r[order]
negoneterm *= -1
if r.get('simplify', True):
psol = simplify(psol)
psol = trigsimp(psol, deep=True)
return Eq(f(x), gsol.rhs + psol)
def ode_separable(eq, func, order, match):
r"""
Solves separable 1st order differential equations.
This is any differential equation that can be written as
P(y)*dy/dx = Q(x). The solution can then just be found by
rearranging terms and integrating:
Integral(P(y), y) = Integral(Q(x), x). This hint uses separatevars()
as its back end, so if a separable equation is not caught by this
solver, it is most likely the fault of that function. separatevars()
is smart enough to do most expansion and factoring necessary to
convert a separable equation F(x, y) into the proper form P(x)*Q(y).
The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> a, b, c, d, f = map(Function, ['a', 'b', 'c', 'd', 'f'])
>>> genform = Eq(a(x)*b(f(x))*f(x).diff(x), c(x)*d(f(x)))
>>> pprint(genform)
d
a(x)*b(f(x))*--(f(x)) = c(x)*d(f(x))
dx
>>> pprint(dsolve(genform, f(x), hint='separable_Integral'))
f(x)
/ /
| |
| b(y) | c(x)
| ---- dy = C1 + | ---- dx
| d(y) | a(x)
| |
/ /
Examples
========
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(f(x)*f(x).diff(x) + x, 3*x*f(x)**2), f(x),
... hint='separable', simplify=False))
/ 2 \ 2
log\3*f (x) - 1/ x
---------------- = C1 + --
6 2
References
==========
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 52
# indirect doctest
"""
x = func.args[0]
f = func.func
C1 = Symbol('C1')
r = match # {'m1':m1, 'm2':m2, 'y':y}
return Eq(C.Integral(r['m2']['coeff']*r['m2'][r['y']]/r['m1'][r['y']],
(r['y'], None, f(x))), C.Integral(-r['m1']['coeff']*r['m1'][x]/
r['m2'][x], x)+C1)
|
flacjacket/sympy
|
sympy/solvers/ode.py
|
Python
|
bsd-3-clause
| 125,328
|
#!/usr/bin/env python
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
import roslib; roslib.load_manifest('vigir_behavior_simple_joint_control_test')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, Logger
from vigir_flexbe_states.check_current_control_mode_state import CheckCurrentControlModeState
from vigir_flexbe_states.change_control_mode_action_state import ChangeControlModeActionState
from vigir_flexbe_states.moveit_move_group_state import MoveitMoveGroupState
from flexbe_states.decision_state import DecisionState
from flexbe_states.calculation_state import CalculationState
from flexbe_states.wait_state import WaitState
from vigir_flexbe_states.execute_trajectory_state import ExecuteTrajectoryState
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
from vigir_flexbe_states.update_dynamic_parameter_state import UpdateDynamicParameterState
from vigir_flexbe_states.read_dynamic_parameter_state import ReadDynamicParameterState
from flexbe_states.start_record_logs_state import StartRecordLogsState
from flexbe_states.stop_record_logs_state import StopRecordLogsState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
import time
import os
import rospy
# [/MANUAL_IMPORT]
'''
Created on Mon Nov 03 2014
@author: Philipp and Spyros
'''
class SimpleJointControlTestSM(Behavior):
'''
Get step response of joint controllers by varying PID gains.
'''
def __init__(self):
super(SimpleJointControlTestSM, self).__init__()
self.name = 'Simple Joint Control Test'
# parameters of this behavior
self.add_parameter('topics_to_record', '')
self.add_parameter('joint_upper_bounds', 0.6)
self.add_parameter('joint_lower_bounds', 0.4)
self.add_parameter('real_robot', True)
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# 0-5 left arm
# 6-11 right arm
# for each: wrx, wry, elx, ely, shx, shz
# simulation
self._joint_limits_sim = [ \
[-0.44, 1.57], \
[-1.57, 1.57], \
[0.00, 2.35], \
[0.00, 3.14], \
[-1.40, 1.75], \
[-1.96, 1.96], \
\
[-1.57, 0.44], \
[-1.57, 1.57], \
[-2.35, 0.00], \
[0.00, 3.14], \
[-1.75, 1.40], \
[-1.96, 1.96] \
]
# real robot
self._joint_limits_rob = [ \
[-1.18, 1.18], \
[0.00, 3.14], \
[0.00, 2.36], \
[0.00, 3.14], \
[-1.57, 1.57], \
[-1.57, 0.79], \
\
[-1.18, 1.18], \
[0.00, 3.14], \
[-2.36, 0.00], \
[0.00, 3.14], \
[-1.57, 1.57], \
[-1.57, 0.79], \
]
self._joint_limits = []
# joint order: shz, shx, ely, elx, wry, wrx
self._joint_configs_down = []
self._joint_configs_up = []
self._traj_controllers = [ \
UpdateDynamicParameterState.LEFT_ARM_WRX, \
UpdateDynamicParameterState.LEFT_ARM_WRY, \
UpdateDynamicParameterState.LEFT_ARM_ELX, \
UpdateDynamicParameterState.LEFT_ARM_ELY, \
UpdateDynamicParameterState.LEFT_ARM_SHX, \
UpdateDynamicParameterState.LEFT_ARM_SHZ, \
\
UpdateDynamicParameterState.RIGHT_ARM_WRX, \
UpdateDynamicParameterState.RIGHT_ARM_WRY, \
UpdateDynamicParameterState.RIGHT_ARM_ELX, \
UpdateDynamicParameterState.RIGHT_ARM_ELY, \
UpdateDynamicParameterState.RIGHT_ARM_SHX, \
UpdateDynamicParameterState.RIGHT_ARM_SHZ \
]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
joint_names_left = ["l_arm_shz", "l_arm_shx", "l_arm_ely", "l_arm_elx", "l_arm_wry", "l_arm_wrx"]
joint_names_right = ["r_arm_shz", "r_arm_shx", "r_arm_ely", "r_arm_elx", "r_arm_wry", "r_arm_wrx"]
wait_time = 3.0
bagfolder = "" # calculated
gains_list = {'pid_gains': ['p', 'i', 'd'], 'bdi_gains': ['k_qd_p', 'ff_qd_d'], 'vigir_gains': ['ff_bang', 'ff_effort', 'ff_friction']}
# x:30 y:365, x:130 y:365
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.joints_left_up = [] # calculated
_state_machine.userdata.joints_right_up = [] # calculated
_state_machine.userdata.joint_index = 0
_state_machine.userdata.zero_time = [0.02]
_state_machine.userdata.joint_positions_up = [] # calculated
_state_machine.userdata.joint_positions_down = [] # calculated
_state_machine.userdata.joint_index = 0
_state_machine.userdata.none = None
_state_machine.userdata.init_time = [3.0]
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# 'Basic' configuration for SIMULATION
#_state_machine.userdata.joints_left_up = [0.00, 0.18, 1.57, 1.18, 0.00, 0.57]
#_state_machine.userdata.joints_right_up = [0.00, -0.18, 1.57, -1.18, 0.00, -0.57]
logs_folder = os.path.expanduser('~/joint_control_tests/')
if not os.path.exists(logs_folder):
os.makedirs(logs_folder)
bagfolder = os.path.join(logs_folder, "run_" + time.strftime("%Y-%m-%d-%H_%M"))
os.makedirs(bagfolder)
self._joint_limits = self._joint_limits_rob if self.real_robot else self._joint_limits_sim
# standard config
joints_left_up = [0] * 6
for i in range(6):
joint_range = self._joint_limits[i][1] - self._joint_limits[i][0]
joints_left_up[5-i] = self._joint_limits[i][0] + joint_range * 0.5
joints_right_up = [0] * 6
for i in range(6):
joint_range = self._joint_limits[i+6][1] - self._joint_limits[i+6][0]
joints_right_up[5-i] = self._joint_limits[i+6][0] + joint_range * 0.5
_state_machine.userdata.joints_left_up = joints_left_up
_state_machine.userdata.joints_right_up = joints_right_up
rospy.loginfo('Average left joint positions: ' + ' '.join(map(str, joints_left_up)))
rospy.loginfo('Average right joint positions: ' + ' '.join(map(str, joints_right_up)))
# left
for i in range(6):
joint_config_up = list(_state_machine.userdata.joints_left_up)
joint_config_down = list(_state_machine.userdata.joints_left_up)
joint_range = self._joint_limits[i][1] - self._joint_limits[i][0]
joint_config_up[5-i] = self._joint_limits[i][0] + joint_range * self.joint_upper_bounds
joint_config_down[5-i] = self._joint_limits[i][0] + joint_range * self.joint_lower_bounds
self._joint_configs_up.append([joint_config_up])
self._joint_configs_down.append([joint_config_down])
rospy.loginfo('Left Joint Config Up: ' + ' '.join(map(str, joint_config_up)))
rospy.loginfo('Left Joint Config Dn: ' + ' '.join(map(str, joint_config_down)))
# right
for i in range(6):
joint_config_up = list(_state_machine.userdata.joints_right_up)
joint_config_down = list(_state_machine.userdata.joints_right_up)
joint_range = self._joint_limits[i+6][1] - self._joint_limits[i+6][0]
joint_config_up[5-i] = self._joint_limits[i+6][0] + joint_range * self.joint_upper_bounds
joint_config_down[5-i] = self._joint_limits[i+6][0] + joint_range * self.joint_lower_bounds
self._joint_configs_up.append([joint_config_up])
self._joint_configs_down.append([joint_config_down])
rospy.loginfo('Right Joint Config Up: ' + ' '.join(map(str, joint_config_up)))
rospy.loginfo('Right Joint Config Dn: ' + ' '.join(map(str, joint_config_down)))
# [/MANUAL_CREATE]
# x:30 y:365, x:130 y:365
_sm_move_joint_down_0 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['joint_index', 'joint_positions_down', 'zero_time', 'joints_right_up', 'joints_left_up', 'init_time'])
with _sm_move_joint_down_0:
# x:71 y:145
OperatableStateMachine.add('Move_Left_Arm_Back',
MoveitMoveGroupState(planning_group="l_arm_group", joint_names=joint_names_left),
transitions={'reached': 'Move_Right_Arm_Back', 'failed': 'failed'},
autonomy={'reached': Autonomy.Low, 'failed': Autonomy.High},
remapping={'target_joint_config': 'joints_left_up'})
# x:639 y:69
OperatableStateMachine.add('Move_Left_Joint_Down',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_LEFT_ARM, joint_names=joint_names_left),
transitions={'done': 'finished', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Off},
remapping={'joint_positions': 'joint_positions_down', 'time': 'init_time'})
# x:631 y:200
OperatableStateMachine.add('Move_Right_Joint_Down',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_RIGHT_ARM, joint_names=joint_names_right),
transitions={'done': 'finished', 'failed': 'failed'},
autonomy={'done': Autonomy.High, 'failed': Autonomy.Off},
remapping={'joint_positions': 'joint_positions_down', 'time': 'init_time'})
# x:201 y:54
OperatableStateMachine.add('Move_Right_Arm_Back',
MoveitMoveGroupState(planning_group="r_arm_group", joint_names=joint_names_right),
transitions={'reached': 'Decide_Left_Or_Right', 'failed': 'failed'},
autonomy={'reached': Autonomy.Low, 'failed': Autonomy.High},
remapping={'target_joint_config': 'joints_right_up'})
# x:429 y:62
OperatableStateMachine.add('Decide_Left_Or_Right',
DecisionState(outcomes=["left", "right"], conditions=lambda it: "left" if it < 6 else "right"),
transitions={'left': 'Move_Left_Joint_Down', 'right': 'Move_Right_Joint_Down'},
autonomy={'left': Autonomy.High, 'right': Autonomy.High},
remapping={'input_value': 'joint_index'})
# x:30 y:365, x:130 y:365
_sm_perform_gain_test_right_1 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['joint_positions_up', 'joint_positions_down', 'zero_time'])
with _sm_perform_gain_test_right_1:
# x:84 y:39
OperatableStateMachine.add('Initial_Wait',
WaitState(wait_time=wait_time),
transitions={'done': 'Perform_Step_Up'},
autonomy={'done': Autonomy.Off})
# x:80 y:218
OperatableStateMachine.add('Wait_Up',
WaitState(wait_time=wait_time),
transitions={'done': 'Perform_Step_Down'},
autonomy={'done': Autonomy.Off})
# x:44 y:331
OperatableStateMachine.add('Perform_Step_Down',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_RIGHT_ARM, joint_names=joint_names_right),
transitions={'done': 'Wait_Down', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'joint_positions': 'joint_positions_down', 'time': 'zero_time'})
# x:73 y:440
OperatableStateMachine.add('Wait_Down',
WaitState(wait_time=wait_time),
transitions={'done': 'Perforn_Step_Up_2'},
autonomy={'done': Autonomy.Off})
# x:414 y:401
OperatableStateMachine.add('Perforn_Step_Up_2',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_RIGHT_ARM, joint_names=joint_names_right),
transitions={'done': 'Wait_Up_2', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'joint_positions': 'joint_positions_up', 'time': 'zero_time'})
# x:442 y:291
OperatableStateMachine.add('Wait_Up_2',
WaitState(wait_time=wait_time),
transitions={'done': 'Perform_Step_Down_2'},
autonomy={'done': Autonomy.Off})
# x:416 y:167
OperatableStateMachine.add('Perform_Step_Down_2',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_RIGHT_ARM, joint_names=joint_names_right),
transitions={'done': 'Wait_Down_2', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'joint_positions': 'joint_positions_down', 'time': 'zero_time'})
# x:449 y:62
OperatableStateMachine.add('Wait_Down_2',
WaitState(wait_time=wait_time),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:48 y:113
OperatableStateMachine.add('Perform_Step_Up',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_RIGHT_ARM, joint_names=joint_names_right),
transitions={'done': 'Wait_Up', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'joint_positions': 'joint_positions_up', 'time': 'zero_time'})
# x:30 y:365, x:130 y:365
_sm_perform_gain_test_left_2 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['joint_positions_up', 'joint_positions_down', 'zero_time'])
with _sm_perform_gain_test_left_2:
# x:84 y:39
OperatableStateMachine.add('Initial_Wait',
WaitState(wait_time=wait_time),
transitions={'done': 'Perform_Step_Up_1'},
autonomy={'done': Autonomy.Off})
# x:87 y:232
OperatableStateMachine.add('Wait_Up_1',
WaitState(wait_time=wait_time),
transitions={'done': 'Perform_Step_Down_1'},
autonomy={'done': Autonomy.Off})
# x:50 y:321
OperatableStateMachine.add('Perform_Step_Down_1',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_LEFT_ARM, joint_names=joint_names_left),
transitions={'done': 'Wait_Down_1', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'joint_positions': 'joint_positions_down', 'time': 'zero_time'})
# x:77 y:415
OperatableStateMachine.add('Wait_Down_1',
WaitState(wait_time=wait_time),
transitions={'done': 'Perforn_Step_Up_2'},
autonomy={'done': Autonomy.Off})
# x:51 y:131
OperatableStateMachine.add('Perform_Step_Up_1',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_LEFT_ARM, joint_names=joint_names_left),
transitions={'done': 'Wait_Up_1', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'joint_positions': 'joint_positions_up', 'time': 'zero_time'})
# x:414 y:401
OperatableStateMachine.add('Perforn_Step_Up_2',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_LEFT_ARM, joint_names=joint_names_left),
transitions={'done': 'Wait_Up_2', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'joint_positions': 'joint_positions_up', 'time': 'zero_time'})
# x:442 y:291
OperatableStateMachine.add('Wait_Up_2',
WaitState(wait_time=wait_time),
transitions={'done': 'Perform_Step_Down_2'},
autonomy={'done': Autonomy.Off})
# x:416 y:167
OperatableStateMachine.add('Perform_Step_Down_2',
ExecuteTrajectoryState(controller=ExecuteTrajectoryState.CONTROLLER_LEFT_ARM, joint_names=joint_names_left),
transitions={'done': 'Wait_Down_2', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'joint_positions': 'joint_positions_down', 'time': 'zero_time'})
# x:449 y:62
OperatableStateMachine.add('Wait_Down_2',
WaitState(wait_time=wait_time),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:30 y:365, x:130 y:365
_sm_test_individual_joint_3 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['joint_positions_up', 'joint_positions_down', 'joint_index', 'traj_controller', 'none', 'zero_time', 'joints_right_up', 'joints_left_up', 'init_time'])
with _sm_test_individual_joint_3:
# x:45 y:60
OperatableStateMachine.add('Initialize_Iteration',
CalculationState(calculation=lambda x: 0),
transitions={'done': 'Move_Joint_Down'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'none', 'output_value': 'iteration'})
# x:520 y:555
OperatableStateMachine.add('Perform_Gain_Test_Left',
_sm_perform_gain_test_left_2,
transitions={'finished': 'Stop_Gain_Logs', 'failed': 'Stop_Gain_Logs'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'joint_positions_up': 'joint_positions_up', 'joint_positions_down': 'joint_positions_down', 'zero_time': 'zero_time'})
# x:176 y:388
OperatableStateMachine.add('Decide_If_Tests_To_Go',
DecisionState(outcomes=["done", "continue"], conditions=lambda it: "done" if it == 5 else "continue"),
transitions={'done': 'Reset_Joint_Gains', 'continue': 'Calculate_Next_Gain_Value'},
autonomy={'done': Autonomy.Off, 'continue': Autonomy.Off},
remapping={'input_value': 'iteration'})
# x:144 y:298
OperatableStateMachine.add('Calculate_Next_Gain_Value',
FlexibleCalculationState(calculation=self.calculate_gains, input_keys=["iteration", "nominal_gain"]),
transitions={'done': 'Set_Joint_Gain'},
autonomy={'done': Autonomy.Off},
remapping={'iteration': 'iteration', 'nominal_gain': 'nominal_gains', 'output_value': 'altered_gains'})
# x:395 y:268
OperatableStateMachine.add('Set_Joint_Gain',
UpdateDynamicParameterState(param=gains_list),
transitions={'updated': 'Set_Logfile_Name', 'failed': 'failed'},
autonomy={'updated': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'traj_controller': 'traj_controller', 'parameter_value': 'altered_gains'})
# x:190 y:193
OperatableStateMachine.add('Get_Joint_Gains',
ReadDynamicParameterState(param=gains_list),
transitions={'read': 'Calculate_Next_Gain_Value', 'failed': 'failed'},
autonomy={'read': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'traj_controller': 'traj_controller', 'parameter_value': 'nominal_gains'})
# x:158 y:505
OperatableStateMachine.add('Increment_Iteration_Counter',
CalculationState(calculation=lambda it: it + 1),
transitions={'done': 'Decide_If_Tests_To_Go'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'iteration', 'output_value': 'iteration'})
# x:798 y:435
OperatableStateMachine.add('Decide_Left_Or_Right',
DecisionState(outcomes=["left", "right"], conditions=lambda it: "left" if it < 6 else "right"),
transitions={'left': 'Perform_Gain_Test_Left', 'right': 'Perform_Gain_Test_Right'},
autonomy={'left': Autonomy.High, 'right': Autonomy.High},
remapping={'input_value': 'joint_index'})
# x:811 y:624
OperatableStateMachine.add('Perform_Gain_Test_Right',
_sm_perform_gain_test_right_1,
transitions={'finished': 'Stop_Gain_Logs', 'failed': 'Stop_Gain_Logs'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'joint_positions_up': 'joint_positions_up', 'joint_positions_down': 'joint_positions_down', 'zero_time': 'zero_time'})
# x:545 y:458
OperatableStateMachine.add('Start_Gain_Logs',
StartRecordLogsState(topics_to_record=self.topics_to_record),
transitions={'logging': 'Decide_Left_Or_Right'},
autonomy={'logging': Autonomy.Off},
remapping={'bagfile_name': 'bagfile_name', 'rosbag_process': 'rosbag_process'})
# x:184 y:616
OperatableStateMachine.add('Stop_Gain_Logs',
StopRecordLogsState(),
transitions={'stopped': 'Increment_Iteration_Counter'},
autonomy={'stopped': Autonomy.Off},
remapping={'rosbag_process': 'rosbag_process'})
# x:576 y:346
OperatableStateMachine.add('Set_Logfile_Name',
FlexibleCalculationState(calculation=lambda i: bagfolder + self._traj_controllers[i[1]][1] + "_k_p_" + str(i[0][0]) + ".bag", input_keys=["gain_percentage", "joint_index"]),
transitions={'done': 'Start_Gain_Logs'},
autonomy={'done': Autonomy.Off},
remapping={'gain_percentage': 'altered_gains', 'joint_index': 'joint_index', 'output_value': 'bagfile_name'})
# x:210 y:53
OperatableStateMachine.add('Move_Joint_Down',
_sm_move_joint_down_0,
transitions={'finished': 'Get_Joint_Gains', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'joint_index': 'joint_index', 'joint_positions_down': 'joint_positions_down', 'zero_time': 'zero_time', 'joints_right_up': 'joints_right_up', 'joints_left_up': 'joints_left_up', 'init_time': 'init_time'})
# x:365 y:430
OperatableStateMachine.add('Reset_Joint_Gains',
UpdateDynamicParameterState(param=gains_list),
transitions={'updated': 'finished', 'failed': 'failed'},
autonomy={'updated': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'traj_controller': 'traj_controller', 'parameter_value': 'nominal_gains'})
# x:30 y:365, x:130 y:365
_sm_test_joint_controls_4 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['joint_index', 'none', 'zero_time', 'joints_right_up', 'joints_left_up', 'init_time'])
with _sm_test_joint_controls_4:
# x:47 y:121
OperatableStateMachine.add('Decide_Joints_To_Go',
DecisionState(outcomes=["done", "continue"], conditions=lambda idx: "done" if idx == len(self._joint_configs_down) else "continue"),
transitions={'done': 'finished', 'continue': 'Select_Next_Joint_Up'},
autonomy={'done': Autonomy.High, 'continue': Autonomy.Low},
remapping={'input_value': 'joint_index'})
# x:257 y:290
OperatableStateMachine.add('Select_Next_Joint_Up',
CalculationState(calculation=lambda idx: self._joint_configs_up[idx]),
transitions={'done': 'Select_Next_Joint_Down'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'joint_index', 'output_value': 'joint_positions_up'})
# x:571 y:68
OperatableStateMachine.add('Test_Individual_Joint',
_sm_test_individual_joint_3,
transitions={'finished': 'Increment_Joint_Index', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'joint_positions_up': 'joint_positions_up', 'joint_positions_down': 'joint_positions_down', 'joint_index': 'joint_index', 'traj_controller': 'traj_controller', 'none': 'none', 'zero_time': 'zero_time', 'joints_right_up': 'joints_right_up', 'joints_left_up': 'joints_left_up', 'init_time': 'init_time'})
# x:529 y:324
OperatableStateMachine.add('Select_Next_Joint_Down',
CalculationState(calculation=lambda idx: self._joint_configs_down[idx]),
transitions={'done': 'Set_Trajectory_Controller'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'joint_index', 'output_value': 'joint_positions_down'})
# x:222 y:51
OperatableStateMachine.add('Increment_Joint_Index',
CalculationState(calculation=lambda it: it + 1),
transitions={'done': 'Decide_Joints_To_Go'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'joint_index', 'output_value': 'joint_index'})
# x:559 y:189
OperatableStateMachine.add('Set_Trajectory_Controller',
CalculationState(calculation=lambda idx: self._traj_controllers[idx]),
transitions={'done': 'Test_Individual_Joint'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'joint_index', 'output_value': 'traj_controller'})
with _state_machine:
# x:112 y:38
OperatableStateMachine.add('Check_Initial_Stand',
CheckCurrentControlModeState(target_mode=CheckCurrentControlModeState.STAND, wait=False),
transitions={'correct': 'Switch_To_Manipulate', 'incorrect': 'Set_Initial_Stand'},
autonomy={'correct': Autonomy.Low, 'incorrect': Autonomy.Low},
remapping={'control_mode': 'control_mode'})
# x:336 y:123
OperatableStateMachine.add('Set_Initial_Stand',
ChangeControlModeActionState(target_mode=ChangeControlModeActionState.STAND),
transitions={'changed': 'Switch_To_Manipulate', 'failed': 'failed'},
autonomy={'changed': Autonomy.Low, 'failed': Autonomy.High})
# x:60 y:235
OperatableStateMachine.add('Switch_To_Manipulate',
ChangeControlModeActionState(target_mode=ChangeControlModeActionState.MANIPULATE),
transitions={'changed': 'Bring_Left_Arm_Up', 'failed': 'failed'},
autonomy={'changed': Autonomy.Low, 'failed': Autonomy.High})
# x:105 y:428
OperatableStateMachine.add('Bring_Left_Arm_Up',
MoveitMoveGroupState(planning_group="l_arm_group", joint_names=joint_names_left),
transitions={'reached': 'Bring_Right_Arm_Up', 'failed': 'failed'},
autonomy={'reached': Autonomy.Low, 'failed': Autonomy.High},
remapping={'target_joint_config': 'joints_left_up'})
# x:323 y:482
OperatableStateMachine.add('Bring_Right_Arm_Up',
MoveitMoveGroupState(planning_group="r_arm_group", joint_names=joint_names_right),
transitions={'reached': 'Test_Joint_Controls', 'failed': 'failed'},
autonomy={'reached': Autonomy.High, 'failed': Autonomy.High},
remapping={'target_joint_config': 'joints_right_up'})
# x:620 y:465
OperatableStateMachine.add('Test_Joint_Controls',
_sm_test_joint_controls_4,
transitions={'finished': 'Change_Back_To_Stand', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'joint_index': 'joint_index', 'none': 'none', 'zero_time': 'zero_time', 'joints_right_up': 'joints_right_up', 'joints_left_up': 'joints_left_up', 'init_time': 'init_time'})
# x:831 y:349
OperatableStateMachine.add('Change_Back_To_Stand',
ChangeControlModeActionState(target_mode=ChangeControlModeActionState.STAND),
transitions={'changed': 'finished', 'failed': 'failed'},
autonomy={'changed': Autonomy.Off, 'failed': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
def calculate_gains(self, input_values):
iteration = input_values[0]
nominal_gains = input_values[1]
gain_percentage = nominal_gains[0] * (0.4 + 0.2 * iteration)
altered_gains = [gain_percentage]
for gain in nominal_gains[1:]:
altered_gains.append(0)
return altered_gains
# [/MANUAL_FUNC]
|
team-vigir/vigir_behaviors
|
behaviors/vigir_behavior_simple_joint_control_test/src/vigir_behavior_simple_joint_control_test/simple_joint_control_test_sm.py
|
Python
|
bsd-3-clause
| 26,416
|
import pytest
from httpie import ExitStatus
from httpie.output.formatters.colors import get_lexer
from utils import TestEnvironment, http, HTTP_OK, COLOR, CRLF
class TestVerboseFlag:
def test_verbose(self, httpbin):
r = http('--verbose',
'GET', httpbin.url + '/get', 'test-header:__test__')
assert HTTP_OK in r
assert r.count('__test__') == 2
def test_verbose_form(self, httpbin):
# https://github.com/jakubroztocil/httpie/issues/53
r = http('--verbose', '--form', 'POST', httpbin.url + '/post',
'A=B', 'C=D')
assert HTTP_OK in r
assert 'A=B&C=D' in r
def test_verbose_json(self, httpbin):
r = http('--verbose',
'POST', httpbin.url + '/post', 'foo=bar', 'baz=bar')
assert HTTP_OK in r
assert '"baz": "bar"' in r
class TestColors:
@pytest.mark.parametrize('mime', [
'application/json',
'application/json+foo',
'application/foo+json',
'foo/json',
'foo/json+bar',
'foo/bar+json',
])
def test_get_lexer(self, mime):
lexer = get_lexer(mime)
assert lexer is not None
assert lexer.name == 'JSON'
def test_get_lexer_not_found(self):
assert get_lexer('xxx/yyy') is None
class TestPrettyOptions:
"""Test the --pretty flag handling."""
def test_pretty_enabled_by_default(self, httpbin):
env = TestEnvironment(colors=256)
r = http('GET', httpbin.url + '/get', env=env)
assert COLOR in r
def test_pretty_enabled_by_default_unless_stdout_redirected(self, httpbin):
r = http('GET', httpbin.url + '/get')
assert COLOR not in r
def test_force_pretty(self, httpbin):
env = TestEnvironment(stdout_isatty=False, colors=256)
r = http('--pretty=all', 'GET', httpbin.url + '/get', env=env, )
assert COLOR in r
def test_force_ugly(self, httpbin):
r = http('--pretty=none', 'GET', httpbin.url + '/get')
assert COLOR not in r
def test_subtype_based_pygments_lexer_match(self, httpbin):
"""Test that media subtype is used if type/subtype doesn't
match any lexer.
"""
env = TestEnvironment(colors=256)
r = http('--print=B', '--pretty=all', httpbin.url + '/post',
'Content-Type:text/foo+json', 'a=b', env=env)
assert COLOR in r
def test_colors_option(self, httpbin):
env = TestEnvironment(colors=256)
r = http('--print=B', '--pretty=colors',
'GET', httpbin.url + '/get', 'a=b',
env=env)
# Tests that the JSON data isn't formatted.
assert not r.strip().count('\n')
assert COLOR in r
def test_format_option(self, httpbin):
env = TestEnvironment(colors=256)
r = http('--print=B', '--pretty=format',
'GET', httpbin.url + '/get', 'a=b',
env=env)
# Tests that the JSON data is formatted.
assert r.strip().count('\n') == 2
assert COLOR not in r
class TestLineEndings:
"""
Test that CRLF is properly used in headers
and as the headers/body separator.
"""
def _validate_crlf(self, msg):
lines = iter(msg.splitlines(True))
for header in lines:
if header == CRLF:
break
assert header.endswith(CRLF), repr(header)
else:
assert 0, 'CRLF between headers and body not found in %r' % msg
body = ''.join(lines)
assert CRLF not in body
return body
def test_CRLF_headers_only(self, httpbin):
r = http('--headers', 'GET', httpbin.url + '/get')
body = self._validate_crlf(r)
assert not body, 'Garbage after headers: %r' % r
def test_CRLF_ugly_response(self, httpbin):
r = http('--pretty=none', 'GET', httpbin.url + '/get')
self._validate_crlf(r)
def test_CRLF_formatted_response(self, httpbin):
r = http('--pretty=format', 'GET', httpbin.url + '/get')
assert r.exit_status == ExitStatus.OK
self._validate_crlf(r)
def test_CRLF_ugly_request(self, httpbin):
r = http('--pretty=none', '--print=HB', 'GET', httpbin.url + '/get')
self._validate_crlf(r)
def test_CRLF_formatted_request(self, httpbin):
r = http('--pretty=format', '--print=HB', 'GET', httpbin.url + '/get')
self._validate_crlf(r)
|
Irdroid/httpie
|
tests/test_output.py
|
Python
|
bsd-3-clause
| 4,467
|
from django.views.generic import ListView, TemplateView
class IndexView(TemplateView):
template_name = 'index.html'
|
fiee/fiee-temporale
|
demo/views.py
|
Python
|
bsd-3-clause
| 121
|
##########################################################################
#
# Copyright 2010 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios),
# its affiliates and/or its licensors.
#
# Copyright (c) 2010-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
# require HOM
import hou
# require IECore
import IECore
# our c++ module components
from _IECoreHoudini import *
# function sets
from FnParameterisedHolder import FnParameterisedHolder
from FnOpHolder import FnOpHolder
from FnProceduralHolder import FnProceduralHolder
# misc utility methods
from TestCase import TestCase
from TestProgram import TestProgram
import ParmTemplates
import Utils
from ActiveTake import ActiveTake
from TemporaryParameterValues import TemporaryParameterValues
from UpdateMode import UpdateMode
## \todo: remove this hack if SideFx provides a swig-free method for sending a HOM_Node* to python
HoudiniScene.node = lambda x : hou.node( x._getNodePath() )
|
davidsminor/cortex
|
python/IECoreHoudini/__init__.py
|
Python
|
bsd-3-clause
| 2,596
|
import numpy as np
import warnings
from scipy._lib._util import check_random_state
def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None):
"""
Generate random samples from a probability density function using the
ratio-of-uniforms method.
Parameters
----------
pdf : callable
A function with signature `pdf(x)` that is the probability
density function of the distribution.
umax : float
The upper bound of the bounding rectangle in the u-direction.
vmin : float
The lower bound of the bounding rectangle in the v-direction.
vmax : float
The upper bound of the bounding rectangle in the v-direction.
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
c : float, optional.
Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
random_state : {None, int, `~np.random.RandomState`, `~np.random.Generator`}, optional
If `random_state` is `None` the `~np.random.RandomState` singleton is
used.
If `random_state` is an int, a new ``RandomState`` instance is used,
seeded with random_state.
If `random_state` is already a ``RandomState`` or ``Generator``
instance, then that object is used.
Default is None.
Returns
-------
rvs : ndarray
The random variates distributed according to the probability
distribution defined by the pdf.
Notes
-----
Given a univariate probability density function `pdf` and a constant `c`,
define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.
If `(U, V)` is a random vector uniformly distributed over `A`,
then `V/U + c` follows a distribution according to `pdf`.
The above result (see [1]_, [2]_) can be used to sample random variables
using only the pdf, i.e. no inversion of the cdf is required. Typical
choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of
the rectangle ``R = [0, umax] x [vmin, vmax]`` where
- ``umax = sup sqrt(pdf(x))``
- ``vmin = inf (x - c) sqrt(pdf(x))``
- ``vmax = sup (x - c) sqrt(pdf(x))``
In particular, these values are finite if `pdf` is bounded and
``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).
One can generate `(U, V)` uniformly on `R` and return
`V/U + c` if `(U, V)` are also in `A` which can be directly
verified.
Intuitively, the method works well if `A` fills up most of the
enclosing rectangle such that the probability is high that `(U, V)`
lies in `A` whenever it lies in `R` as the number of required
iterations becomes too large otherwise. To be more precise, note that
the expected number of iterations to draw `(U, V)` uniformly
distributed on `R` such that `(U, V)` is also in `A` is given by
the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin)``, using the fact
that the area of `A` is equal to 1/2 (Theorem 7.1 in [1]_). A warning
is displayed if this ratio is larger than 20. Moreover, if the sampling
fails to generate a single random variate after 50000 iterations (i.e.
not a single draw is in `A`), an exception is raised.
If the bounding rectangle is not correctly specified (i.e. if it does not
contain `A`), the algorithm samples from a distribution different from
the one given by `pdf`. It is therefore recommended to perform a
test such as `~scipy.stats.kstest` as a check.
References
----------
.. [1] L. Devroye, "Non-Uniform Random Variate Generation",
Springer-Verlag, 1986.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
.. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random
Variables Using the Ratio of Uniform Deviates",
ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.
Examples
--------
>>> from scipy import stats
Simulate normally distributed random variables. It is easy to compute the
bounding rectangle explicitly in that case.
>>> f = stats.norm.pdf
>>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
>>> umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
>>> np.random.seed(12345)
>>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500)
The K-S test confirms that the random variates are indeed normally
distributed (normality is not rejected at 5% significance level):
>>> stats.kstest(rvs, 'norm')[1]
0.3420173467307603
The exponential distribution provides another example where the bounding
rectangle can be determined explicitly.
>>> np.random.seed(12345)
>>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,
... vmin=0, vmax=2*np.exp(-1), size=1000)
>>> stats.kstest(rvs, 'expon')[1]
0.928454552559516
Sometimes it can be helpful to use a non-zero shift parameter `c`, see e.g.
[2]_ above in the case of the generalized inverse Gaussian distribution.
"""
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
exp_iter = 2 * (vmax - vmin) * umax # rejection constant (see [1])
if exp_iter > 20:
msg = ("The expected number of iterations to generate a single random "
"number from the desired distribution is larger than {}, "
"potentially causing bad performance.".format(int(exp_iter)))
warnings.warn(msg, RuntimeWarning)
size1d = tuple(np.atleast_1d(size))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# start sampling using ratio of uniforms method
rng = check_random_state(random_state)
x = np.zeros(N)
simulated, i = 0, 1
# loop until N rvs have been generated: expected runtime is finite
# to avoid infinite loop, raise exception if not a single rv has been
# generated after 50000 tries. even if exp_iter = 1000, probability of
# this event is (1-1/1000)**50000 which is of order 10e-22
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u1 = umax * rng.uniform(size=k)
v1 = rng.uniform(vmin, vmax, size=k)
# apply rejection method
rvs = v1 / u1 + c
accept = (u1**2 <= pdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated in {} "
"attempts. The ratio of uniforms method does not appear "
"to work for the provided parameters. Please check the "
"pdf and the bounds.".format(i*N))
raise RuntimeError(msg)
i += 1
return np.reshape(x, size1d)
|
aeklant/scipy
|
scipy/stats/_rvs_sampling.py
|
Python
|
bsd-3-clause
| 7,080
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-12 07:13
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0004_auto_20160511_0845'),
]
operations = [
migrations.AlterField(
model_name='teammember',
name='bio',
field=wagtail.core.fields.RichTextField(help_text='The team member bio', max_length=360),
),
]
|
evonove/evonove.it
|
django-website/home/migrations/0005_auto_20160512_0713.py
|
Python
|
bsd-3-clause
| 518
|
"""
Contains base level parents that aren't to be used directly.
"""
from twisted.internet.defer import inlineCallbacks, returnValue
from fuzzywuzzy.process import QRatio
from fuzzywuzzy import utils as fuzz_utils
from src.daemons.server.ansi import ANSI_HILITE, ANSI_NORMAL
from src.daemons.server.objects.exceptions import ObjectHasZoneMembers, NoSuchObject
from src.daemons.server.protocols.proxyamp import EmitToObjectCmd
#noinspection PyShadowingBuiltins
class BaseObject(object):
"""
This is the base parent for every in-game "object". Rooms, Players, and
Things are all considered objects. Behaviors here are very low level.
"""
# Holds this object's command table. Any objects inside of this object
# will check this for command matches before the global table.
local_command_table = None
# Same as above, but for admin-only commands.
local_admin_command_table = None
def __init__(self, mud_service, id, parent, name, description=None,
internal_description=None,
location_id=None, destination_id=None, zone_id=None,
aliases=None, originally_controlled_by_account_id=None,
controlled_by_account_id=None, attributes=None,
created_time=None):
"""
:param MudService mud_service: The MudService class running the game.
:param int id: A unique ID for the object, or None if this is
a new object.
:param str parent: The Python path to the parent class for this
instantiated object.
:param str name: The non-ASCII'd name.
:param str description: The object's description.
:keyword int location_id: The ID of the object this object resides within.
None if this object is location-less.
:keyword int destination_id: Used to determine where an exit leads.
:keyword int zone_id: Optional zone ID (ID of another BaseObject).
:keyword int originally_controlled_by_account_id: Account ID that
first controlled this object (if it was created in conjunction
with an account).
:keyword in controlled_by_account_id: If this object is being controlled
by an account, this will be populated.
:keyword dict kwargs: All objects are instantiated with the values from
the DB as kwargs. Since the DB representation of all of an
objects attributes is just a dict, this works really well.
:keyword datetime.datetime created_time: The time the object was
created.
"""
self.mud_service = mud_service
# This mirrors the 'id' field in dott_objects. If this is set to None
# and the instance is saved, an insert is done.
self.id = id
self.name = name
self.description = description
self.internal_description = internal_description
self.parent = parent
self.location_id = location_id
self.destination_id = destination_id
self.zone_id = zone_id
self.aliases = aliases or []
self.originally_controlled_by_account_id = originally_controlled_by_account_id
self.controlled_by_account_id = controlled_by_account_id
# This stores all of the object's data. This includes core and
# userspace attributes.
self._attributes = attributes or {}
self.created_time = created_time
assert isinstance(self._attributes, dict)
def __str__(self):
return "<%s: %s (#%d)>" % (self.__class__.__name__, self.name, self.id)
def __repr__(self):
return self.__str__()
#
## Begin properties.
#
@property
def _object_store(self):
"""
Short-cut to the global object store.
:rtype: ObjectStore
:returns: Reference to the global object store instance.
"""
return self.mud_service.object_store
@property
def _command_handler(self):
"""
Short-cut to the global command handler.
:rtype: CommandHandler
:returns: Reference to the global command handler instance.
"""
return self.mud_service.command_handler
def _generic_id_to_baseobject_property_getter(self, attrib_name):
"""
A generic getter for attributes that store object IDs. Given an
object ID, retrieve it or None.
:rtype: BaseObject or None
:returns: The ``BaseObject`` instance for the attribute, or None
if there is no value.
:raises: NoSuchObject if the stored object ID has no match in
the DB.
"""
obj_id = getattr(self, attrib_name)
if obj_id:
#noinspection PyTypeChecker
return self._object_store.get_object(obj_id)
else:
return None
def _generic_baseobject_to_id_property_setter(self, attrib_name, obj_or_id):
"""
Sets this object's zone.
:param obj_or_id: The object or object ID to set as the
object's zone master.
:type obj_or_id: A ``BaseObject`` sub-class or an ``int``.
"""
if isinstance(obj_or_id, int):
# Already an int, assume this is an object ID.
setattr(self, attrib_name, obj_or_id)
elif isinstance(obj_or_id, basestring):
raise Exception("BaseObject.set_%s() can't accept strings for object IDs: %s" % (
attrib_name, obj_or_id))
elif obj_or_id is None:
setattr(self, attrib_name, None)
else:
# Looks like a BaseObject sub-class. Grab the object ID.
setattr(self, attrib_name, obj_or_id.id)
@property
def attributes(self):
"""
Redirects to the object's attributes dict.
:rtype: dict
"""
return self._attributes
def get_location(self):
"""
Determines the object's location and returns the instance representing
this object's location.
:returns: The ``BaseObject`` instance (sub-class) that this object
is currently in. Typically a ``RoomObject``, but can also be
other types.
:raises: NoSuchObject if the stored object ID has no match in
the DB.
"""
return self._generic_id_to_baseobject_property_getter('location_id')
def set_location(self, obj_or_id):
"""
Sets this object's location.
:param obj_or_id: The object or object ID to set as the
object's location.
:type obj_or_id: A ``BaseObject`` sub-class or a ``str``.
"""
if self.base_type == 'room':
# Rooms can't have locations.
return
self._generic_baseobject_to_id_property_setter('location_id', obj_or_id)
location = property(get_location, set_location)
def get_zone(self):
"""
Determines the object's zone and returns the instance representing
this object's zone.
:rtype: BaseObject or None
:returns: The ``BaseObject`` instance (sub-class) that is this object's
zone master object.
:raises: NoSuchObject if the stored object ID has no match in
the DB.
"""
return self._generic_id_to_baseobject_property_getter('zone_id')
def set_zone(self, obj_or_id):
"""
Sets this object's zone.
:param obj_or_id: The object or object ID to set as the
object's zone master.
:type obj_or_id: A ``BaseObject`` sub-class or an ``int``.
"""
self._generic_baseobject_to_id_property_setter('zone_id', obj_or_id)
zone = property(get_zone, set_zone)
#noinspection PyPropertyDefinition
@property
def base_type(self):
"""
BaseObject's primary three sub-classes are Room, Player, Exit,
and Thing. These are all considered the top-level children, and
everything else will be children of them. Room, Player, Exit, and
Thing are the only three valid base types, and each parent should
return one of the following for quick-and-easy type checking:
* room
* player
* exit
* thing
This should only be used for display, never for inheritance checking!
isinstance and friends are there for that.
:rtype: str
"""
raise NotImplementedError('Over-ride in sub-class.')
#
## Begin regular methods.
#
@inlineCallbacks
def save(self):
"""
Shortcut for saving an object to the object store it's a member of.
"""
saved_obj = yield self._object_store.save_object(self)
returnValue(saved_obj)
@inlineCallbacks
def destroy(self):
"""
Destroys the object.
"""
# Destroy all exits that were linked to this object.
if self.base_type not in ['exit', 'player']:
for exit in self._object_store.find_exits_linked_to_obj(self):
yield exit.destroy()
# Un-set the zones on all objects who are members of this object.
zone_members = self._object_store.find_objects_in_zone(self)
if zone_members:
raise ObjectHasZoneMembers(
"Object has zone members. @zmo/empty first, or use "
"@zmo/delete instead.")
# Destroys this object, once all cleanup is done.
yield self._object_store.destroy_object(self)
def execute_command(self, command_string):
"""
Directs the object to execute a certain command. Passes the command
string through the command handler.
:param str command_string: The command to run.
"""
# Input gets handed off to the command handler, where it is parsed
# and routed through various command tables.
if not self._command_handler.handle_input(self, command_string):
self.emit_to('Huh?')
def emit_to(self, message):
"""
Emits to any Session objects attached to this object.
:param str message: The message to emit to any Sessions attached to
the object.
"""
assert self.id is not None, "Attempting to emit to an object with no ID."
self.mud_service.proxyamp.callRemote(
EmitToObjectCmd,
object_id=self.id,
message=message
)
def emit_to_contents(self, message, exclude=None):
"""
Emits the given message to everything in this object's inventory.
:param str message: The message to emit to any object within
this one.
:param BaseObject exclude: A list of objects who are to be
excluded from the emit list. These objects will not see the emit.
"""
if not exclude:
exclude = []
else:
exclude = [obj.id for obj in exclude]
contents = self.get_contents()
for obj in contents:
if obj.id not in exclude:
obj.emit_to(message)
def move_to(self, destination_obj, force_look=True):
"""
Moves this object to the given destination.
:param BaseObject destination_obj: Where to move this object to.
:param bool force_look: If True, the object will run the "look"
command between movements.
"""
old_location_obj = self.location
#noinspection PyUnresolvedReferences
old_location_obj.before_object_leaves_event(self)
destination_obj.before_object_enters_event(self)
self.set_location(destination_obj)
self.save()
#noinspection PyUnresolvedReferences
old_location_obj.after_object_leaves_event(self)
destination_obj.after_object_enters_event(self)
if force_look:
self.execute_command('look')
def is_admin(self):
"""
This always returns ``False``, since objects don't have administrative
powers by default.
:rtype: bool
:returns: ``False``
"""
return False
def get_contents(self):
"""
Returns the list of objects 'inside' this object.
:rtype: list
:returns: A list of :class:`BaseObject` instances whose location is
this object.
"""
return self._object_store.get_object_contents(self)
#noinspection PyUnusedLocal
def get_description(self, invoker, from_inside=False):
"""
Returns the description of this object.
:param BaseObject invoker: The object asking for the description.
:param bool from_inside: If True, use an internal description instead
of the normal description, if available. For example, the inside
of a vehicle should have a different description than the outside.
"""
if from_inside and self.internal_description:
return self.internal_description
return self.description
def get_appearance_name(self, invoker, force_admin_view=False):
"""
Returns the 'pretty' form of the name for the object's appearance.
:param invoker: The object asking for the appearance. If None is
provided, provide the non-admin view.
:type invoker: BaseObject or None
:param bool force_admin_view: If this is True, force the adin view,
even if the invoker is not an admin (or no invoker is given).
:rtype: str
:returns: The object's 'pretty' name.
"""
if (invoker and invoker.is_admin()) or force_admin_view:
# Used to show a single-character type identifier next to object id.
if self.base_type == 'room':
type_str = 'R'
elif self.base_type == 'thing':
type_str = 'T'
elif self.base_type == 'exit':
type_str = 'E'
elif self.base_type == 'player':
type_str = 'P'
else:
# Wtf dis?
type_str = 'U'
extra_info = '(#%s%s)' % (
self.id,
type_str,
)
else:
extra_info = ''
return "%s%s%s%s" % (ANSI_HILITE, self.name, ANSI_NORMAL, extra_info)
#noinspection PyUnusedLocal
def get_appearance_contents_and_exits(self, invoker, from_inside=False):
"""
Returns the contents and exits display for the object.
:param BaseObject invoker: The object asking for the appearance.
:param bool from_inside: Show the contents/exits as if the invoker
was inside this object.
:rtype: str
:returns: The contents/exits display.
"""
exits_str = ''
things_str = ''
contents = self.get_contents()
for obj in contents:
if obj.id == invoker.id:
# This is the invoker, don't show yourself.
continue
if obj.base_type == 'exit':
# Exits show the exit's primary alias.
obj_alias = obj.aliases[0] if obj.aliases else '_'
exits_str += '<%s> %s\n' % (
obj_alias,
obj.get_appearance_name(invoker),
)
else:
# Everything else just shows the name.
things_str += '%s\n' % obj.get_appearance_name(invoker)
retval = ''
if things_str:
retval += '\nContents:\n'
retval += things_str
if exits_str:
retval += '\nExits:\n'
retval += exits_str
return retval
def get_appearance(self, invoker):
"""
Shows the full appearance for an object. Includes description, contents,
exits, and everything else.
:param BaseObject invoker: The object asking for the appearance.
:rtype: str
:returns: The object's appearance, from the outside or inside.
"""
#noinspection PyUnresolvedReferences
is_inside = invoker.location.id == self.id
desc = self.get_description(invoker, from_inside=is_inside)
name = self.get_appearance_name(invoker)
contents = self.get_appearance_contents_and_exits(
invoker,
from_inside=is_inside
)
return "%s\n%s\n%s" % (name, desc, contents)
def _find_name_or_alias_match(self, objects, query):
"""
Performs name and alias matches on a list of objects. Returns the
best match, or ``None`` if nothing was found.
:param iterable objects: A list of ``BaseObject`` sub-class instances
to attempt to match to.
:param str query: The string to match against.
:rtype: BaseObject
:returns: The best match object for the given query.
"""
if not objects:
return None
for obj in objects:
# Start by checking all objects for an alias match.
aliases = [alias.lower() for alias in obj.aliases]
if query.lower() in aliases:
# If a match is found, return immediately on said match.
return obj
processor = lambda x: fuzz_utils.full_process(x)
for choice in objects:
processed = processor(choice.name)
if query in processed:
return choice
return None
def _find_object_id_match(self, desc):
"""
Given an object ID string (ie: '#9'), determine whether this object
can find said object.
:param str desc: A string with which to perform a search
:rtype: :class:'BaseObject' or ``None``
:returns: An object that best matches the string provided. If no
suitable match was found, returns ``None``.
"""
mud_service = self.mud_service
try:
# Object IDs are int primary keys in the object store.
obj_id = int(desc[1:])
except (ValueError, TypeError):
# This isn't an object ID.
return None
# Absolute object identifier: lookup the id
try:
obj = mud_service.object_store.get_object(obj_id)
except NoSuchObject:
return None
if not self.is_admin():
# Non-admins can only find objects in their current location.
if self.location and obj.location:
# Both invoker and the target have a location. See if they
# are in the same place.
#noinspection PyUnresolvedReferences
location_match = self.location.id == obj.location.id or \
self.location.id == obj.id
if location_match:
# Locations match. Good to go.
return obj
elif obj.base_type == 'room':
#noinspection PyUnresolvedReferences
if self.location and self.location.id == obj.id:
# Non-admin is looking at their current location, which
# is a room.
return obj
else:
# Non-specified or differing locations. Either way, there
# is no usable match.
return None
else:
# Invoker is an admin, and can find object id matches globally.
return obj
def contextual_object_search(self, desc):
"""
Searches for objects using the current object as a frame of
reference
:param str desc: A string with which to perform a search
:rtype: :class:'BaseObject' or ``None``
:returns: An object that best matches the string provided. If no
suitable match was found, returns ``None``.
"""
desc = desc.strip()
if not desc:
# Probably an empty string, which we can't do much with.
return None
if desc[0] == '#':
oid_match = self._find_object_id_match(desc)
if oid_match:
return oid_match
if desc.lower() == 'me':
# Object is referring to itself
return self
if desc.lower() == 'here':
# Object is referring to it's location
return self.location
# Not a keyword, begin fuzzy search
# First search the objects in the room
if self.location:
#noinspection PyUnresolvedReferences
neighboring_match = self._find_name_or_alias_match(
self.location.get_contents(),
desc
)
if neighboring_match:
return neighboring_match
# Next search the objects inside the invoker
inventory_match = self._find_name_or_alias_match(
self.get_contents(),
desc
)
if inventory_match:
return inventory_match
# Unable to find anything
return None
def can_object_enter(self, obj):
"""
Determine whether another object can enter this object.
:param BaseObject obj: The object to check enter permissions for.
:rtype: tuple
:returns: A tuple in the format of ``(can_enter, message)``, where
``can_enter`` is a bool, and ``message`` is a string or ``None``,
used to provide a reason for the object not being able to enter.
"""
if obj.is_admin():
# Admin can enter anything.
return True, None
return False, "You can't enter that."
def determine_enter_destination(self, obj):
"""
Given an object that is going to enter this one, determine where said
object will be moved to. This defaults to this object's inventory,
but in the case of something like a ship, they should enter to the
bridge.
:param BaseObject obj: The other object that is entering this one.
:rtype: BaseObject
:returns: The target location for the object to be moved to upon
entering this object.
"""
return self
def can_object_leave(self, obj):
"""
Determine whether another object can leave this object.
:param BaseObject obj: The object to check enter permissions for.
:rtype: tuple
:returns: A tuple in the format of ``(can_leave, message)``, where
``can_leave`` is a bool, and ``message`` is a string or ``None``,
used to provide a reason for the object not being able to leave.
"""
if not obj.location:
return False, "You can't find a way out."
# All is well
return True, None
def determine_leave_destination(self, obj):
"""
Given an object that is going to leave this one, determine where said
object will be moved to. This defaults to this object's location,
but in the case of leaving a ship's bridge, they should end up outside
the ship, rather than inside the ship object.
:param BaseObject obj: The other object that is entering this one.
:rtype: BaseObject
:returns: The target location for the object to be moved to upon
leaving this object.
"""
return self.location
#
## Begin events
#
def after_session_connect_event(self):
"""
This is called when the proxy authenticates and logs in a Session that
controls this object. This event is only triggered when the first
Session controlling this object is logged in. For example, logging in
a second time with another client would not trigger this again.
This is currently only meaningful for PlayerObject instances. We don't
want players to see connects/disconnects when admins are controlling
NPCs.
"""
pass
def after_session_disconnect_event(self):
"""
This is called when the last Sesssion that controls this object is
disconnected. If you have two clients open that are authenticated and
controlling the same object, this will not trigger until the last
Session is closed.
This is currently only meaningful for PlayerObject instances. We don't
want players to see connects/disconnects when admins are controlling
NPCs.
"""
pass
#noinspection PyUnusedLocal
def before_object_leaves_event(self, actor):
"""
Triggered before an object leaves this object's inventory.
:param BaseObject actor: The object doing the leaving.
"""
pass
#noinspection PyUnusedLocal
def after_object_leaves_event(self, actor):
"""
Triggered after an object physically leaves this object's inventory.
:param BaseObject actor: The object doing the leaving.
"""
for obj in self.get_contents():
# Can't use self.emit_to_contents because we need to determine
# appearance on a per-object basis.
obj.emit_to('%s has left' % actor.get_appearance_name(obj))
#noinspection PyUnusedLocal
def before_object_enters_event(self, actor):
"""
Triggered before an object arrives in this object's inventory.
:param BaseObject actor: The object doing the entering.
"""
for obj in self.get_contents():
# Can't use self.emit_to_contents because we need to determine
# appearance on a per-object basis.
obj.emit_to('%s has arrived' % actor.get_appearance_name(obj))
#noinspection PyUnusedLocal
def after_object_enters_event(self, actor):
"""
Triggered after an object physically enters this object's inventory.
:param BaseObject actor: The object doing the entering.
"""
pass
|
gtaylor/dott
|
src/game/parents/base_objects/base.py
|
Python
|
bsd-3-clause
| 25,944
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2009 Arthur Furlan <arthur.furlan@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# On Debian systems, you can find the full text of the license in
# /usr/share/common-licenses/GPL-2
import os
import twitter
import urllib, urllib2
from django.conf import settings
from django.contrib.sites.models import Site
TWITTER_MAXLENGTH = getattr(settings, 'TWITTER_MAXLENGTH', 140)
def post_to_twitter(sender, instance, *args, **kwargs):
"""
Post new saved objects to Twitter.
Example:
from django.db import models
class MyModel(models.Model):
text = models.CharField(max_length=255)
link = models.CharField(max_length=255)
def __unicode__(self):
return u'%s' % self.text
def get_absolute_url(self):
return self.link
# the following method is optional
def get_twitter_message(self):
return u'my-custom-twitter-message: %s - %s' \
% (self.text, self.link)
models.signals.post_save.connect(post_to_twitter, sender=MyModel)
"""
# avoid to post the same object twice
if not kwargs.get('created'):
return False
# check if there's a twitter account configured
try:
username = settings.TWITTER_USERNAME
password = settings.TWITTER_PASSWORD
except AttributeError:
print 'WARNING: Twitter account not configured.'
return False
# if the absolute url wasn't a real absolute url and doesn't contains the
# protocol and domain defineds, then append this relative url to the domain
# of the current site, emulating the browser's behaviour
url = instance.get_absolute_url()
if not url.startswith('http://') and not url.startswith('https://'):
domain = Site.objects.get_current().domain
url = u'http://%s%s' % (domain, url)
# tinyurl'ze the object's link
create_api = 'http://tinyurl.com/api-create.php'
data = urllib.urlencode(dict(url=url))
link = urllib2.urlopen(create_api, data=data).read().strip()
# create the twitter message
try:
text = instance.get_twitter_message()
except AttributeError:
text = unicode(instance)
mesg = u'%s - %s' % (text, link)
if len(mesg) > TWITTER_MAXLENGTH:
size = len(mesg + '...') - TWITTER_MAXLENGTH
mesg = u'%s... - %s' % (text[:-size], link)
try:
twitter_api = twitter.Api(username, password)
twitter_api.PostUpdate(mesg)
except urllib2.HTTPError, ex:
print 'ERROR:', str(ex)
return False
|
Kazade/NeHe-Website
|
public/post_to_twitter.py
|
Python
|
bsd-3-clause
| 2,865
|
# -*- coding: utf-8 -*-
#
# hrCMS documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 28 20:11:13 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'leonardo-api'
copyright = u'2015, Michael Kuty'
author = u'Michael Kuty'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'hrCMSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'leonardo_api.tex', u'leonardo-api',
u'Michael Kuty', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'leonardo_api', u'leonardo-api Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'leonardo_api', u'leonardo-api',
author, 'leonardo_api', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
michaelkuty/leonardo-api
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 9,452
|
#!/usr/bin/env python
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description: Child-table and child recovery
#
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
NUM_CHILDREN = 7
router = wpan.Node()
children = []
for i in range(NUM_CHILDREN):
children.append(wpan.Node())
all_nodes = [router] + children
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
router.form('child-table')
for child in children:
child.join_node(router, node_type=wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
child.set(wpan.WPAN_POLL_INTERVAL, '1000')
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
# Get the child table and verify all children are in the table.
child_table = wpan.parse_child_table_result(
router.get(wpan.WPAN_THREAD_CHILD_TABLE)
)
verify(len(child_table) == len(children))
for child in children:
ext_addr = child.get(wpan.WPAN_EXT_ADDRESS)[1:-1]
for entry in child_table:
if entry.ext_address == ext_addr:
break
else:
print(
'Failed to find a child entry for extended address {} in table'.format(
ext_addr
)
)
exit(1)
verify(
int(entry.rloc16, 16) == int(child.get(wpan.WPAN_THREAD_RLOC16), 16)
)
verify(int(entry.timeout, 0) == 120)
verify(entry.is_rx_on_when_idle() is False)
verify(entry.is_ftd() is False)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
|
turon/openthread
|
tests/toranj/test-011-child-table.py
|
Python
|
bsd-3-clause
| 3,917
|
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('',
url('^$',
'actionkit_raplet.views.raplet',
name='raplet'),
)
|
350dotorg/aktlet
|
actionkit_raplet/urls.py
|
Python
|
bsd-3-clause
| 170
|
import datetime
import commonware.log
import django_tables as tables
import jinja2
from django.conf import settings
from django.template import Context, loader
from django.utils.datastructures import SortedDict
from jingo import register
from tower import ugettext as _, ugettext_lazy as _lazy, ungettext as ngettext
import amo
from access import acl
from addons.helpers import new_context
from addons.models import Addon
from amo.helpers import absolutify, breadcrumbs, page_title
from amo.urlresolvers import reverse
from amo.utils import send_mail as amo_send_mail
from editors.models import (ReviewerScore, ViewFastTrackQueue,
ViewFullReviewQueue, ViewPendingQueue,
ViewPreliminaryQueue)
from editors.sql_table import SQLTable
@register.function
def file_compare(file_obj, version):
# Compare this file to the one in the version with same platform
file_obj = version.files.filter(platform=file_obj.platform)
# If not there, just compare to all.
if not file_obj:
file_obj = version.files.filter(platform=amo.PLATFORM_ALL.id)
# At this point we've got no idea what Platform file to
# compare with, so just chose the first.
if not file_obj:
file_obj = version.files.all()
return file_obj[0]
@register.function
def file_review_status(addon, file):
if file.status not in [amo.STATUS_DISABLED, amo.STATUS_PUBLIC]:
if addon.status in [amo.STATUS_UNREVIEWED, amo.STATUS_LITE]:
return _(u'Pending Preliminary Review')
elif addon.status in [amo.STATUS_NOMINATED,
amo.STATUS_LITE_AND_NOMINATED,
amo.STATUS_PUBLIC]:
return _(u'Pending Full Review')
if file.status in [amo.STATUS_DISABLED, amo.STATUS_REJECTED]:
if file.reviewed is not None:
return _(u'Rejected')
# Can't assume that if the reviewed date is missing its
# unreviewed. Especially for versions.
else:
return _(u'Rejected or Unreviewed')
return amo.STATUS_CHOICES[file.status]
@register.function
def version_status(addon, version):
return ','.join(unicode(s) for s in version.status)
@register.function
@jinja2.contextfunction
def editor_page_title(context, title=None, addon=None):
"""Wrapper for editor page titles. Eerily similar to dev_page_title."""
if addon:
title = u'%s :: %s' % (title, addon.name)
else:
section = _lazy('Editor Tools')
title = u'%s :: %s' % (title, section) if title else section
return page_title(context, title)
@register.function
@jinja2.contextfunction
def editors_breadcrumbs(context, queue=None, addon_queue=None, items=None,
themes=False):
"""
Wrapper function for ``breadcrumbs``. Prepends 'Editor Tools'
breadcrumbs.
**items**
list of [(url, label)] to be inserted after Add-on.
**addon_queue**
Addon object. This sets the queue by addon type or addon status.
**queue**
Explicit queue type to set.
"""
crumbs = [(reverse('editors.home'), _('Editor Tools'))]
if themes:
crumbs.append((reverse('editors.themes.home'), _('Themes')))
if addon_queue:
queue_id = addon_queue.status
queue_ids = {amo.STATUS_UNREVIEWED: 'prelim',
amo.STATUS_NOMINATED: 'nominated',
amo.STATUS_PUBLIC: 'pending',
amo.STATUS_LITE: 'prelim',
amo.STATUS_LITE_AND_NOMINATED: 'nominated',
amo.STATUS_PENDING: 'pending'}
queue = queue_ids.get(queue_id, 'queue')
if queue:
queues = {
'queue': _('Queue'),
'pending': _('Pending Updates'),
'nominated': _('Full Reviews'),
'prelim': _('Preliminary Reviews'),
'moderated': _('Moderated Reviews'),
'fast_track': _('Fast Track'),
'pending_themes': _('Pending Themes'),
'flagged_themes': _('Flagged Themes'),
'rereview_themes': _('Update Themes'),
}
if items and not queue == 'queue':
url = reverse('editors.queue_%s' % queue)
else:
# The Addon is the end of the trail.
url = None
crumbs.append((url, queues[queue]))
if items:
crumbs.extend(items)
return breadcrumbs(context, crumbs, add_default=False)
@register.function
@jinja2.contextfunction
def queue_tabnav(context):
"""Returns tuple of tab navigation for the queue pages.
Each tuple contains three elements: (tab_code, page_url, tab_text)
"""
from .views import queue_counts
counts = queue_counts()
tabnav = [('fast_track', 'queue_fast_track',
(ngettext('Fast Track ({0})', 'Fast Track ({0})',
counts['fast_track'])
.format(counts['fast_track']))),
('nominated', 'queue_nominated',
(ngettext('Full Review ({0})', 'Full Reviews ({0})',
counts['nominated'])
.format(counts['nominated']))),
('pending', 'queue_pending',
(ngettext('Pending Update ({0})', 'Pending Updates ({0})',
counts['pending'])
.format(counts['pending']))),
('prelim', 'queue_prelim',
(ngettext('Preliminary Review ({0})',
'Preliminary Reviews ({0})',
counts['prelim'])
.format(counts['prelim']))),
('moderated', 'queue_moderated',
(ngettext('Moderated Review ({0})', 'Moderated Reviews ({0})',
counts['moderated'])
.format(counts['moderated'])))]
return tabnav
@register.inclusion_tag('editors/includes/reviewers_score_bar.html')
@jinja2.contextfunction
def reviewers_score_bar(context, types=None, addon_type=None):
user = context.get('amo_user')
return new_context(dict(
request=context.get('request'),
amo=amo, settings=settings,
points=ReviewerScore.get_recent(user, addon_type=addon_type),
total=ReviewerScore.get_total(user),
**ReviewerScore.get_leaderboards(user, types=types,
addon_type=addon_type)))
class ItemStateTable(object):
def increment_item(self):
self.item_number += 1
def set_page(self, page):
self.item_number = page.start_index()
class EditorQueueTable(SQLTable, ItemStateTable):
addon_name = tables.Column(verbose_name=_lazy(u'Addon'))
addon_type_id = tables.Column(verbose_name=_lazy(u'Type'))
waiting_time_min = tables.Column(verbose_name=_lazy(u'Waiting Time'))
flags = tables.Column(verbose_name=_lazy(u'Flags'), sortable=False)
applications = tables.Column(verbose_name=_lazy(u'Applications'),
sortable=False)
platforms = tables.Column(verbose_name=_lazy(u'Platforms'),
sortable=False)
additional_info = tables.Column(
verbose_name=_lazy(u'Additional'), sortable=False)
def render_addon_name(self, row):
url = '%s?num=%s' % (reverse('editors.review',
args=[row.addon_slug]),
self.item_number)
self.increment_item()
return u'<a href="%s">%s <em>%s</em></a>' % (
url, jinja2.escape(row.addon_name),
jinja2.escape(row.latest_version))
def render_addon_type_id(self, row):
return amo.ADDON_TYPE[row.addon_type_id]
def render_additional_info(self, row):
info = []
if row.is_site_specific:
info.append(_lazy(u'Site Specific'))
if row.external_software:
info.append(_lazy(u'Requires External Software'))
if row.binary or row.binary_components:
info.append(_lazy(u'Binary Components'))
return u', '.join([jinja2.escape(i) for i in info])
def render_applications(self, row):
# TODO(Kumar) show supported version ranges on hover (if still needed)
icon = u'<div class="app-icon ed-sprite-%s" title="%s"></div>'
return u''.join([icon % (amo.APPS_ALL[i].short, amo.APPS_ALL[i].pretty)
for i in row.application_ids])
def render_platforms(self, row):
icons = []
html = u'<div class="platform-icon plat-sprite-%s" title="%s"></div>'
for platform in row.file_platform_ids:
icons.append(html % (amo.PLATFORMS[int(platform)].shortname,
amo.PLATFORMS[int(platform)].name))
return u''.join(icons)
def render_flags(self, row):
return ''.join(u'<div class="app-icon ed-sprite-%s" '
u'title="%s"></div>' % flag
for flag in row.flags)
def render_waiting_time_min(self, row):
if row.waiting_time_min == 0:
r = _lazy('moments ago')
elif row.waiting_time_hours == 0:
# L10n: first argument is number of minutes
r = ngettext(u'{0} minute', u'{0} minutes',
row.waiting_time_min).format(row.waiting_time_min)
elif row.waiting_time_days == 0:
# L10n: first argument is number of hours
r = ngettext(u'{0} hour', u'{0} hours',
row.waiting_time_hours).format(row.waiting_time_hours)
else:
# L10n: first argument is number of days
r = ngettext(u'{0} day', u'{0} days',
row.waiting_time_days).format(row.waiting_time_days)
return jinja2.escape(r)
@classmethod
def translate_sort_cols(cls, colname):
legacy_sorts = {
'name': 'addon_name',
'age': 'waiting_time_min',
'type': 'addon_type_id',
}
return legacy_sorts.get(colname, colname)
@classmethod
def default_order_by(cls):
return '-waiting_time_min'
@classmethod
def review_url(cls, row):
return reverse('editors.review', args=[row.addon_slug])
class Meta:
sortable = True
columns = ['addon_name', 'addon_type_id', 'waiting_time_min',
'flags', 'applications', 'additional_info']
class ViewPendingQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewPendingQueue
class ViewFullReviewQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewFullReviewQueue
class ViewPreliminaryQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewPreliminaryQueue
class ViewFastTrackQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewFastTrackQueue
log = commonware.log.getLogger('z.mailer')
NOMINATED_STATUSES = (amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED)
PRELIMINARY_STATUSES = (amo.STATUS_UNREVIEWED, amo.STATUS_LITE)
PENDING_STATUSES = (amo.STATUS_BETA, amo.STATUS_DISABLED, amo.STATUS_NULL,
amo.STATUS_PENDING, amo.STATUS_PUBLIC)
def send_mail(template, subject, emails, context, perm_setting=None):
template = loader.get_template(template)
amo_send_mail(subject, template.render(Context(context, autoescape=False)),
recipient_list=emails, from_email=settings.EDITORS_EMAIL,
use_blacklist=False, perm_setting=perm_setting)
@register.function
def get_position(addon):
if addon.is_persona() and addon.is_pending():
qs = (Addon.objects.filter(status=amo.STATUS_PENDING,
type=amo.ADDON_PERSONA)
.no_transforms().order_by('created')
.values_list('id', flat=True))
id_ = addon.id
position = 0
for idx, addon_id in enumerate(qs, start=1):
if addon_id == id_:
position = idx
break
total = qs.count()
return {'pos': position, 'total': total}
else:
version = addon.latest_version
if not version:
return False
q = version.current_queue
if not q:
return False
mins_query = q.objects.filter(id=addon.id)
if mins_query.count() > 0:
mins = mins_query[0].waiting_time_min
pos = q.objects.having('waiting_time_min >=', mins).count()
total = q.objects.count()
return dict(mins=mins, pos=pos, total=total)
return False
class ReviewHelper:
"""
A class that builds enough to render the form back to the user and
process off to the correct handler.
"""
def __init__(self, request=None, addon=None, version=None):
self.handler = None
self.required = {}
self.addon = addon
self.all_files = version.files.all() if version else []
self.get_review_type(request, addon, version)
self.actions = self.get_actions()
def set_data(self, data):
self.handler.set_data(data)
def get_review_type(self, request, addon, version):
if self.addon.status in NOMINATED_STATUSES:
self.review_type = 'nominated'
self.handler = ReviewAddon(request, addon, version, 'nominated')
elif self.addon.status == amo.STATUS_UNREVIEWED:
self.review_type = 'preliminary'
self.handler = ReviewAddon(request, addon, version, 'preliminary')
elif self.addon.status == amo.STATUS_LITE:
self.review_type = 'preliminary'
self.handler = ReviewFiles(request, addon, version, 'preliminary')
else:
self.review_type = 'pending'
self.handler = ReviewFiles(request, addon, version, 'pending')
def get_actions(self):
labels, details = self._review_actions()
actions = SortedDict()
if self.review_type != 'preliminary':
actions['public'] = {'method': self.handler.process_public,
'minimal': False,
'label': _lazy('Push to public')}
actions['prelim'] = {'method': self.handler.process_preliminary,
'label': labels['prelim'],
'minimal': False}
actions['reject'] = {'method': self.handler.process_sandbox,
'label': _lazy('Reject'),
'minimal': False}
actions['info'] = {'method': self.handler.request_information,
'label': _lazy('Request more information'),
'minimal': True}
actions['super'] = {'method': self.handler.process_super_review,
'label': _lazy('Request super-review'),
'minimal': True}
actions['comment'] = {'method': self.handler.process_comment,
'label': _lazy('Comment'),
'minimal': True}
for k, v in actions.items():
v['details'] = details.get(k)
return actions
def _review_actions(self):
labels = {'prelim': _lazy('Grant preliminary review')}
details = {'prelim': _lazy('This will mark the files as '
'preliminarily reviewed.'),
'info': _lazy('Use this form to request more information '
'from the author. They will receive an email '
'and be able to answer here. You will be '
'notified by email when they reply.'),
'super': _lazy('If you have concerns about this add-on\'s '
'security, copyright issues, or other '
'concerns that an administrator should look '
'into, enter your comments in the area '
'below. They will be sent to '
'administrators, not the author.'),
'reject': _lazy('This will reject the add-on and remove '
'it from the review queue.'),
'comment': _lazy('Make a comment on this version. The '
'author won\'t be able to see this.')}
if self.addon.status == amo.STATUS_LITE:
details['reject'] = _lazy('This will reject the files and remove '
'them from the review queue.')
if self.addon.status in (amo.STATUS_UNREVIEWED, amo.STATUS_NOMINATED):
details['prelim'] = _lazy('This will mark the add-on as '
'preliminarily reviewed. Future '
'versions will undergo '
'preliminary review.')
elif self.addon.status == amo.STATUS_LITE:
details['prelim'] = _lazy('This will mark the files as '
'preliminarily reviewed. Future '
'versions will undergo '
'preliminary review.')
elif self.addon.status == amo.STATUS_LITE_AND_NOMINATED:
labels['prelim'] = _lazy('Retain preliminary review')
details['prelim'] = _lazy('This will retain the add-on as '
'preliminarily reviewed. Future '
'versions will undergo preliminary '
'review.')
if self.review_type == 'pending':
details['public'] = _lazy('This will approve a sandboxed version '
'of a public add-on to appear on the '
'public side.')
details['reject'] = _lazy('This will reject a version of a public '
'add-on and remove it from the queue.')
else:
details['public'] = _lazy('This will mark the add-on and its most '
'recent version and files as public. '
'Future versions will go into the '
'sandbox until they are reviewed by an '
'editor.')
return labels, details
def process(self):
action = self.handler.data.get('action', '')
if not action:
raise NotImplementedError
return self.actions[action]['method']()
class ReviewBase(object):
def __init__(self, request, addon, version, review_type):
self.request = request
self.user = self.request.user
self.addon = addon
self.version = version
self.review_type = review_type
self.files = None
def set_addon(self, **kw):
"""Alters addon and sets reviewed timestamp on version."""
self.addon.update(**kw)
self.version.update(reviewed=datetime.datetime.now())
def set_files(self, status, files, copy_to_mirror=False,
hide_disabled_file=False):
"""Change the files to be the new status
and copy, remove from the mirror as appropriate."""
for file in files:
file.datestatuschanged = datetime.datetime.now()
file.reviewed = datetime.datetime.now()
if copy_to_mirror:
file.copy_to_mirror()
if hide_disabled_file:
file.hide_disabled_file()
file.status = status
file.save()
def log_action(self, action):
details = {'comments': self.data['comments'],
'reviewtype': self.review_type}
if self.files:
details['files'] = [f.id for f in self.files]
if self.version:
details['version'] = self.version.version
amo.log(action, self.addon, self.version, user=self.user,
created=datetime.datetime.now(), details=details)
def notify_email(self, template, subject):
"""Notify the authors that their addon has been reviewed."""
emails = [a.email for a in self.addon.authors.all()]
data = self.data.copy()
data.update(self.get_context_data())
data['tested'] = ''
os, app = data.get('operating_systems'), data.get('applications')
if os and app:
data['tested'] = 'Tested on %s with %s' % (os, app)
elif os and not app:
data['tested'] = 'Tested on %s' % os
elif not os and app:
data['tested'] = 'Tested with %s' % app
data['addon_type'] = (_lazy('add-on'))
send_mail('editors/emails/%s.ltxt' % template,
subject % (self.addon.name, self.version.version),
emails, Context(data), perm_setting='editor_reviewed')
def get_context_data(self):
return {'name': self.addon.name,
'number': self.version.version,
'reviewer': (self.request.user.display_name),
'addon_url': absolutify(
self.addon.get_url_path(add_prefix=False)),
'review_url': absolutify(reverse('editors.review',
args=[self.addon.pk],
add_prefix=False)),
'comments': self.data['comments'],
'SITE_URL': settings.SITE_URL}
def request_information(self):
"""Send a request for information to the authors."""
emails = [a.email for a in self.addon.authors.all()]
self.log_action(amo.LOG.REQUEST_INFORMATION)
self.version.update(has_info_request=True)
log.info(u'Sending request for information for %s to %s' %
(self.addon, emails))
send_mail('editors/emails/info.ltxt',
u'Mozilla Add-ons: %s %s' %
(self.addon.name, self.version.version),
emails, Context(self.get_context_data()),
perm_setting='individual_contact')
def send_super_mail(self):
self.log_action(amo.LOG.REQUEST_SUPER_REVIEW)
log.info(u'Super review requested for %s' % (self.addon))
send_mail('editors/emails/super_review.ltxt',
u'Super review requested: %s' % (self.addon.name),
[settings.SENIOR_EDITORS_EMAIL],
Context(self.get_context_data()))
def process_comment(self):
self.version.update(has_editor_comment=True)
self.log_action(amo.LOG.COMMENT_VERSION)
class ReviewAddon(ReviewBase):
def __init__(self, *args, **kwargs):
super(ReviewAddon, self).__init__(*args, **kwargs)
self.is_upgrade = (self.addon.status == amo.STATUS_LITE_AND_NOMINATED
and self.review_type == 'nominated')
def set_data(self, data):
self.data = data
self.files = self.version.files.all()
def process_public(self):
"""Set an addon to public."""
if self.review_type == 'preliminary':
raise AssertionError('Preliminary addons cannot be made public.')
# Hold onto the status before we change it.
status = self.addon.status
# Save files first, because set_addon checks to make sure there
# is at least one public file or it won't make the addon public.
self.set_files(amo.STATUS_PUBLIC, self.version.files.all(),
copy_to_mirror=True)
self.set_addon(highest_status=amo.STATUS_PUBLIC,
status=amo.STATUS_PUBLIC)
self.log_action(amo.LOG.APPROVE_VERSION)
self.notify_email('%s_to_public' % self.review_type,
u'Mozilla Add-ons: %s %s Fully Reviewed')
log.info(u'Making %s public' % (self.addon))
log.info(u'Sending email for %s' % (self.addon))
# Assign reviewer incentive scores.
ReviewerScore.award_points(self.request.amo_user, self.addon, status)
def process_sandbox(self):
"""Set an addon back to sandbox."""
# Hold onto the status before we change it.
status = self.addon.status
if (not self.is_upgrade or
not self.addon.versions.exclude(id=self.version.id)
.filter(files__status__in=amo.REVIEWED_STATUSES)):
self.set_addon(status=amo.STATUS_NULL)
else:
self.set_addon(status=amo.STATUS_LITE)
self.set_files(amo.STATUS_DISABLED, self.version.files.all(),
hide_disabled_file=True)
self.log_action(amo.LOG.REJECT_VERSION)
self.notify_email('%s_to_sandbox' % self.review_type,
u'Mozilla Add-ons: %s %s Rejected')
log.info(u'Making %s disabled' % (self.addon))
log.info(u'Sending email for %s' % (self.addon))
# Assign reviewer incentive scores.
ReviewerScore.award_points(self.request.amo_user, self.addon, status)
def process_preliminary(self):
"""Set an addon to preliminary."""
# Hold onto the status before we change it.
status = self.addon.status
changes = {'status': amo.STATUS_LITE}
if (self.addon.status in (amo.STATUS_PUBLIC,
amo.STATUS_LITE_AND_NOMINATED)):
changes['highest_status'] = amo.STATUS_LITE
template = '%s_to_preliminary' % self.review_type
if (self.review_type == 'preliminary' and
self.addon.status == amo.STATUS_LITE_AND_NOMINATED):
template = 'nominated_to_nominated'
self.set_addon(**changes)
self.set_files(amo.STATUS_LITE, self.version.files.all(),
copy_to_mirror=True)
self.log_action(amo.LOG.PRELIMINARY_VERSION)
self.notify_email(template,
u'Mozilla Add-ons: %s %s Preliminary Reviewed')
log.info(u'Making %s preliminary' % (self.addon))
log.info(u'Sending email for %s' % (self.addon))
# Assign reviewer incentive scores.
ReviewerScore.award_points(self.request.amo_user, self.addon, status)
def process_super_review(self):
"""Give an addon super review."""
self.addon.update(admin_review=True)
self.notify_email('author_super_review',
u'Mozilla Add-ons: %s %s flagged for Admin Review')
self.send_super_mail()
class ReviewFiles(ReviewBase):
def set_data(self, data):
self.data = data
self.files = data.get('addon_files', None)
def process_public(self):
"""Set an addons files to public."""
if self.review_type == 'preliminary':
raise AssertionError('Preliminary addons cannot be made public.')
# Hold onto the status before we change it.
status = self.addon.status
self.set_files(amo.STATUS_PUBLIC, self.data['addon_files'],
copy_to_mirror=True)
self.log_action(amo.LOG.APPROVE_VERSION)
self.notify_email('%s_to_public' % self.review_type,
u'Mozilla Add-ons: %s %s Fully Reviewed')
log.info(u'Making %s files %s public' %
(self.addon,
', '.join([f.filename for f in self.data['addon_files']])))
log.info(u'Sending email for %s' % (self.addon))
# Assign reviewer incentive scores.
ReviewerScore.award_points(self.request.amo_user, self.addon, status)
def process_sandbox(self):
"""Set an addons files to sandbox."""
# Hold onto the status before we change it.
status = self.addon.status
self.set_files(amo.STATUS_DISABLED, self.data['addon_files'],
hide_disabled_file=True)
self.log_action(amo.LOG.REJECT_VERSION)
self.notify_email('%s_to_sandbox' % self.review_type,
u'Mozilla Add-ons: %s %s Rejected')
log.info(u'Making %s files %s disabled' %
(self.addon,
', '.join([f.filename for f in self.data['addon_files']])))
log.info(u'Sending email for %s' % (self.addon))
# Assign reviewer incentive scores.
ReviewerScore.award_points(self.request.amo_user, self.addon, status)
def process_preliminary(self):
"""Set an addons files to preliminary."""
# Hold onto the status before we change it.
status = self.addon.status
self.set_files(amo.STATUS_LITE, self.data['addon_files'],
copy_to_mirror=True)
self.log_action(amo.LOG.PRELIMINARY_VERSION)
self.notify_email('%s_to_preliminary' % self.review_type,
u'Mozilla Add-ons: %s %s Preliminary Reviewed')
log.info(u'Making %s files %s preliminary' %
(self.addon,
', '.join([f.filename for f in self.data['addon_files']])))
log.info(u'Sending email for %s' % (self.addon))
# Assign reviewer incentive scores.
ReviewerScore.award_points(self.request.amo_user, self.addon, status)
def process_super_review(self):
"""Give an addon super review when preliminary."""
self.addon.update(admin_review=True)
self.notify_email('author_super_review',
u'Mozilla Add-ons: %s %s flagged for Admin Review')
self.send_super_mail()
@register.function
@jinja2.contextfunction
def logs_tabnav_themes(context):
"""
Returns tuple of tab navigation for the log pages.
Each tuple contains three elements: (named url, tab_code, tab_text)
"""
rv = [
('editors.themes.logs', 'themes', _('Reviews'))
]
if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'):
rv.append(('editors.themes.deleted', 'deleted', _('Deleted')))
return rv
@register.function
@jinja2.contextfunction
def queue_tabnav_themes(context):
"""Similar to queue_tabnav, but for themes."""
tabs = []
if acl.action_allowed(context['request'], 'Personas', 'Review'):
tabs.append((
'editors.themes.list', 'pending_themes', _('Pending'),
))
if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'):
tabs.append((
'editors.themes.list_flagged', 'flagged_themes', _('Flagged'),
))
tabs.append((
'editors.themes.list_rereview', 'rereview_themes',
_('Updates'),
))
return tabs
@register.function
@jinja2.contextfunction
def queue_tabnav_themes_interactive(context):
"""Tabnav for the interactive shiny theme queues."""
tabs = []
if acl.action_allowed(context['request'], 'Personas', 'Review'):
tabs.append((
'editors.themes.queue_themes', 'pending', _('Pending'),
))
if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'):
tabs.append((
'editors.themes.queue_flagged', 'flagged', _('Flagged'),
))
tabs.append((
'editors.themes.queue_rereview', 'rereview', _('Updates'),
))
return tabs
@register.function
@jinja2.contextfunction
def is_expired_lock(context, lock):
return lock.expiry < datetime.datetime.now()
|
anaran/olympia
|
apps/editors/helpers.py
|
Python
|
bsd-3-clause
| 31,475
|
'''
Created on May 9, 2014
@author: Jennifer Reiber Kyle
'''
from kaggle import Submitter, DataReader
from prediction import Preprocessor, Classifier
def runPrediction(sourceDirectory,submitDirectory):
reader = DataReader(dataDir)
p = Preprocessor(*reader.getData())
submitter = Submitter(submitDir)
train, trainLabels, test = p.getOriginalValues()
classifier = Classifier(train,trainLabels,test)
## Predict using Fancy SVM
predictions = classifier.FancySVM()
submitter.saveSubmission(predictions, "fancySVMSubmission")
## Predict using Model SVM
predictions = classifier.ModelSVM()
submitter.saveSubmission(predictions, "modelSVMSubmission")
if __name__ == '__main__':
dataDir = r"D:\NWR\Kaggle\DataScienceLondon"
submitDir = r"D:\NWR\Kaggle\DataScienceLondon\tut_svm"
runPrediction(dataDir, submitDir)
|
jreiberkyle/Kaggle_Data-Science-London
|
main.py
|
Python
|
bsd-3-clause
| 900
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="Ente",
version="0.1",
description="place finder on commoncrawl dataset",
author="László Nagy",
author_email="rizsotto@gmail.com",
license='LICENSE',
url='https://github.com/rizsotto/Ente',
long_description=open('README.md').read(),
scripts=['bin/ente']
)
|
rizsotto/Ente
|
setup.py
|
Python
|
bsd-3-clause
| 383
|
# -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
from flask import url_for
from power_grid_helper.user.models import User
from .factories import UserFactory
class TestLoggingIn:
"""Login."""
def test_can_log_in_returns_200(self, user, testapp):
"""Login successful."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
"""Show alert on logout."""
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
"""Show error if password is incorrect."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert 'Invalid password' in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
"""Show error if username doesn't exist."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert 'Unknown user' in res
class TestRegistering:
"""Register a user."""
def test_can_register(self, user, testapp):
"""Register a new user."""
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get('/')
# Clicks Create Account button
res = res.click('Create account')
# Fills out the form
form = res.forms['registerForm']
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
"""Show error if passwords don't match."""
# Goes to registration page
res = testapp.get(url_for('public.register'))
# Fills out form, but passwords don't match
form = res.forms['registerForm']
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert 'Passwords must match' in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
"""Show error if user already registered."""
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for('public.register'))
# Fills out form, but username is already registered
form = res.forms['registerForm']
form['username'] = user.username
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert 'Username already registered' in res
|
kdheepak89/power-grid-helper
|
tests/test_functional.py
|
Python
|
bsd-3-clause
| 4,009
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from nose.tools import assert_raises
from nose.tools import assert_dict_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_almost_equal
import numpy as np
import tempfile
import pandas as pd
from sktracker import data
from sktracker.trajectories import Trajectories
def test_attributes():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert_array_equal(trajs.t_stamps, np.array([0, 1, 2, 3, 4]))
assert_array_equal(trajs.labels, np.array([0, 1, 2, 3, 4]))
segments = {0: [(0, 0), (1, 0), (2, 0), (3, 0), (4, 0)],
1: [(0, 1), (1, 1), (2, 1), (3, 1), (4, 1)],
2: [(0, 2), (1, 2), (2, 2), (3, 2), (4, 2)],
3: [(0, 3), (1, 3), (2, 3), (3, 3), (4, 3)],
4: [(0, 4), (1, 4), (2, 4), (3, 4), (4, 4)]}
assert_dict_equal(trajs.segment_idxs, segments)
traj = np.array([[ -9.25386045, 11.34555088, 22.11820326, 3. , 0. ],
[ 11.05321776, 3.23738477, 2.62790435, 2. , 1. ],
[ 16.6824928 , 14.602054 , -12.1218683 , 4. , 2. ],
[ 17.22410516, 14.8068125 , -11.87642753, 4. , 3. ],
[ 2.80222495, -13.13783042, 8.56406878, 0. , 4. ]])
t_stamp, traj_to_test = list(trajs.iter_segments)[0]
assert_array_almost_equal(traj, traj_to_test)
assert list(trajs.get_segments().keys()) == [0, 1, 2, 3, 4]
def test_structure():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert_raises(ValueError, trajs.check_trajs_df_structure, ['t_idx'])
assert_raises(ValueError, trajs.check_trajs_df_structure, ['t_stamp', 'label'], ['dx'])
trajs.check_trajs_df_structure(['t_stamp', 'label'], ['x', 'y', 'z'])
def test_copy():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert isinstance(trajs.copy(), Trajectories)
def test_empty():
empty = Trajectories.empty_trajs(columns=['x', 'y'])
assert empty.shape == (0, 2)
assert empty.empty is True
def test_reverse():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert trajs.reverse().shape == (25, 5)
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
trajs.reverse(inplace=True)
assert trajs.shape == (25, 5)
def test_write_hdf():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
tmp_store = tempfile.NamedTemporaryFile(suffix='h5')
with pd.get_store(tmp_store.name) as store:
store['trajs'] = trajs
def test_interpolate():
trajs = Trajectories(data.with_gaps_df())
trajs.set_index('true_label', inplace=True, append=True)
trajs.reset_index(level='label', drop=True, inplace=True)
trajs.index.set_names(['t_stamp', 'label'], inplace=True)
interpolated = trajs.time_interpolate(sampling=3, time_step=0.1, s=1)
# t_stamps_in = interpolated.index.get_level_values('t_stamp')
# indexer = t_stamps_in % 2 == 0
# interpolated.loc[indexer].shape, trajs.shape
# indexer = interpolated.t_stamps % 3 == 0
# assert interpolated.loc[indexer].shape[0] == trajs.shape[0]
dts = interpolated.get_segments()[0].t.diff().dropna()
# All time points should be equaly spaced
assert_almost_equal(dts.min(), dts.max())
def get_get_diff():
trajs = Trajectories(data.brownian_trajs_df())
diffs = trajs.get_diff()
x_diffs = diffs.to_dict()['x']
real_x_diffs = {(1, 2): 3.8452299074207819,
(3, 2): 4.7476193900872765,
(0, 0): np.nan,
(3, 0): 0.54161236467700746,
(0, 4): np.nan,
(1, 4): -5.6929349491048624,
(1, 3): -30.136494087633611,
(2, 3): 23.240228721514185,
(2, 1): -23.9264368052234,
(2, 4): 0.63465512968445115,
(4, 2): -4.5501817884252063,
(1, 0): 20.307078207040306,
(0, 3): np.nan,
(4, 0): -14.421880216023439,
(0, 1): np.nan,
(3, 3): -6.5845079821965991,
(4, 1): -19.329775838349192,
(3, 1): 18.084232469105203,
(4, 4): 24.644945052453025,
(0, 2): np.nan,
(2, 0): 5.6292750381105723,
(4, 3): 13.209596167161628,
(2, 2): -3.7469188310869228,
(3, 4): -17.381636024737336,
(1, 1): 13.827909766138866}
assert_almost_equal(x_diffs, real_x_diffs)
def test_get_speeds():
trajs = Trajectories(data.brownian_trajs_df())
speeds = trajs.get_speeds().tolist()
real_speeds = [np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
857.99153458573994,
1596.9530747771976,
873.15267834726137,
1282.3088174598233,
408.98588960526808,
378.40023709328955,
1809.9895146014187,
917.93227668556324,
592.31881736181106,
0.48325048326444919,
0.39551116881922965,
798.29858694043128,
1085.3214310682606,
405.49164945495221,
550.37555144616226,
1406.707586739079,
1031.9444945962532,
1077.6619763794718,
1445.7789239945778,
739.66839622816326]
assert_almost_equal(speeds, real_speeds)
def test_scale():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
scaled = trajs.scale(factors=[2., 2., 2.],
coords=['x', 'y', 'z'], inplace=False)
assert_array_almost_equal(scaled[['x', 'y', 'z']] / 2., trajs[['x', 'y', 'z']])
trajs = trajs.scale(factors=[2., 2., 2.],
coords=['x', 'y', 'z'], inplace=True)
assert_array_almost_equal(scaled[['x', 'y', 'z']], trajs[['x', 'y', 'z']])
assert_raises(ValueError, trajs.scale, factors=[2., 2., 2.], coords=['x', 'y'], inplace=False)
def test_project():
trajs = Trajectories(data.directed_motion_trajs_df())
trajs.rename(columns={'true_label': 'new_label'}, inplace=True)
trajs.relabel()
trajs.project([0, 1],
coords=['x', 'y'],
keep_first_time=False,
reference=None,
inplace=True,
progress=False)
excepted = np.array([[ 0.27027431, 0. ],
[-0.27027431, 0. ],
[-0.25306519, 0.69683713],
[ 0.04633664, 0.31722648]])
assert_array_almost_equal(excepted, trajs.loc[:,['x_proj', 'y_proj']].values[:4])
trajs = trajs.project([0, 1],
coords=['x', 'y'],
keep_first_time=False,
reference=None,
inplace=False,
progress=False)
assert_array_almost_equal(excepted, trajs.loc[:,['x_proj', 'y_proj']].values[:4])
assert_raises(ValueError, trajs.project, [0, 1], coords=['x', 'y', 'z', 't'])
def test_get_colors():
"""
"""
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
colors = trajs.get_colors()
assert colors == {0: '#FF0000', 1: '#ADFF00', 2: '#00FFA9', 3: '#0408FF', 4: '#FF00AC'}
colors = trajs.get_colors(alpha=0.5)
assert colors == {0: '#FF000080',
1: '#ADFF0080',
2: '#00FFA980',
3: '#0408FF80',
4: '#FF00AC80'}
colors = trajs.get_colors(rgba=True)
good_colors = {0: (1.0, 0.0, 0.0, 1.0),
1: (0.67977809154279767, 1.0, 0.0, 1.0),
2: (0.0, 1.0, 0.66360181783683614, 1.0),
3: (0.015440535661123769, 0.031618928677752463, 1.0, 1.0),
4: (1.0, 0.0, 0.67279469669175529, 1.0)}
assert colors == good_colors
def test_get_longest_segments():
"""
"""
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert trajs.get_longest_segments(1) == [4]
def test_get_shortest_segments():
"""
"""
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert trajs.get_shortest_segments(1) == [0]
def test_remove_segments():
"""
"""
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
trajs.remove_segments(1, inplace=True)
assert np.all(trajs.labels == [0, 2, 3, 4])
def test_merge():
"""
"""
trajs1 = Trajectories(data.brownian_trajs_df())
trajs2 = Trajectories(data.brownian_trajs_df())
new = trajs1.merge(trajs2)
assert len(trajs1.labels) + len(trajs2.labels) == len(new.labels)
def test_relabel():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
trajs.columns = ['x', 'y', 'z', 'new_label', 't']
trajs.relabel(inplace=True)
new_values = [[1.933058243735795, -14.581064591435775, 11.603556633147544, 0.0],
[-12.862215173899491, -2.8611502446443238, -2.2738941196781424, 0.0],
[9.100887851132633, 2.837252570763561, 2.875753940450461, 0.0],
[-9.253860446235523, 11.345550876585719, 22.118203258275745, 0.0]]
assert trajs.iloc[:4].values.tolist() == new_values
trajs = Trajectories(data.brownian_trajs_df())
trajs.columns = ['x', 'y', 'z', 'new_label', 't']
trajs = trajs.relabel(inplace=False)
new_values = [[1.933058243735795, -14.581064591435775, 11.603556633147544, 0.0],
[-12.862215173899491, -2.8611502446443238, -2.2738941196781424, 0.0],
[9.100887851132633, 2.837252570763561, 2.875753940450461, 0.0],
[-9.253860446235523, 11.345550876585719, 22.118203258275745, 0.0]]
assert trajs.iloc[:4].values.tolist() == new_values
def test_relabel_fromzero():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
original_labels = trajs.labels
idx = pd.IndexSlice
trajs.loc[idx[:, 1], :] = 55
relabeled = trajs.relabel_fromzero('label', inplace=False)
assert np.all(relabeled.labels == original_labels)
trajs.loc[idx[:, 1], :] = 55
relabeled = trajs.relabel_fromzero('label', inplace=False)
assert np.all(relabeled.labels == original_labels)
def test_remove_spots():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
new_trajs = trajs.remove_spots([(3, 2), (0, 0)], inplace=False)
new_indexes = [(0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1), (1, 2),
(1, 3), (1, 4), (2, 0), (2, 1), (2, 2), (2, 3), (2, 4),
(3, 0), (3, 1), (3, 3), (3, 4), (4, 0), (4, 1), (4, 2),
(4, 3), (4, 4)]
assert new_trajs.index.tolist() == new_indexes
new_trajs = trajs.remove_spots((0, 0), inplace=False)
new_indexes = [(0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1), (1, 2),
(1, 3), (1, 4), (2, 0), (2, 1), (2, 2), (2, 3), (2, 4),
(3, 0), (3, 1), (3, 2), (3, 3), (3, 4), (4, 0), (4, 1),
(4, 2), (4, 3), (4, 4)]
assert new_trajs.index.tolist() == new_indexes
def test_merge_segments():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
trajs.reset_index(inplace=True)
trajs.loc[15, ['label']] = 88
trajs.loc[20, ['label']] = 88
trajs.set_index(['t_stamp', 'label'], inplace=True)
new_trajs = trajs.merge_segments([0, 88], inplace=False)
assert_array_almost_equal(trajs.values, new_trajs.values)
trajs = Trajectories(data.brownian_trajs_df())
good_trajs = trajs.copy()
trajs.reset_index(inplace=True)
trajs.loc[15, ['label']] = 88
trajs.loc[20, ['label']] = 88
trajs.set_index(['t_stamp', 'label'], inplace=True)
trajs.merge_segments([0, 88], inplace=True)
assert_array_almost_equal(trajs.values, good_trajs.values)
def test_cut_segments():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
trajs.cut_segments((2, 3), inplace=True)
new_indexes = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1),
(1, 2), (1, 3), (1, 4), (2, 0), (2, 1), (2, 2), (2, 3),
(2, 4), (3, 0), (3, 1), (3, 2), (3, 4), (3, 5), (4, 0),
(4, 1), (4, 2), (4, 4), (4, 5)]
assert trajs.index.tolist() == new_indexes
trajs = Trajectories(data.brownian_trajs_df())
trajs = trajs.cut_segments((2, 3), inplace=False)
new_indexes = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1),
(1, 2), (1, 3), (1, 4), (2, 0), (2, 1), (2, 2), (2, 3),
(2, 4), (3, 0), (3, 1), (3, 2), (3, 4), (3, 5), (4, 0),
(4, 1), (4, 2), (4, 4), (4, 5)]
assert trajs.index.tolist() == new_indexes
def test_duplicate_segments():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
trajs = trajs.duplicate_segments(2)
new_indexes = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (1, 0),
(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (2, 0), (2, 1),
(2, 2), (2, 3), (2, 4), (2, 5), (3, 0), (3, 1), (3, 2),
(3, 3), (3, 4), (3, 5), (4, 0), (4, 1), (4, 2), (4, 3),
(4, 4), (4, 5)]
assert trajs.index.tolist() == new_indexes
def test_get_bounds():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
trajs['t'] *= 10
t_stamp_bounds = {0: (0, 4), 1: (0, 4), 2: (0, 4), 3: (0, 4), 4: (0, 4)}
t_bounds = {0: (0.0, 40.0), 1: (0.0, 40.0), 2: (0.0, 40.0), 3: (0.0, 40.0), 4: (0.0, 40.0)}
assert trajs.get_bounds() == t_stamp_bounds
assert trajs.get_bounds(column='t') == t_bounds
def test_get_t_stamps_correspondences():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
trajs['t'] *= 33
data_values = [132, 33, 99, 66, 33, 33, 99., 99, 132]
t_stamps = trajs.get_t_stamps_correspondences(data_values, column='t')
assert_array_equal(t_stamps, [4, 1, 3, 2, 1, 1, 3, 3, 4])
|
bnoi/scikit-tracker
|
sktracker/trajectories/tests/test_trajectories.py
|
Python
|
bsd-3-clause
| 14,672
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import random
import unittest
from measurement_stats import angle
from measurement_stats import value
from measurement_stats import value2D
HALF_SQRT_2 = 0.5 * math.sqrt(2.0)
HALF_SQRT_3 = 0.5 * math.sqrt(3.0)
class TestValue2D(unittest.TestCase):
def test_angleBetween(self):
p1 = value2D.Point2D(
value.ValueUncertainty(2.0, 0.1),
value.ValueUncertainty(0.0, 0.1) )
p2 = value2D.Point2D(
value.ValueUncertainty(0.0, 0.1),
value.ValueUncertainty(2.0, 0.1) )
a = p1.angle_between(p2)
self.assertAlmostEquals(a.degrees, 90.0, 1)
def test_rotate(self):
tests = [
(90.0, 0.0, 1.0), (-90.0, 0.0, -1.0),
(180.0, -1.0, 0.0), (-180.0, -1.0, 0.0),
(270.0, 0.0, -1.0), (-270.0, 0.0, 1.0),
(360.0, 1.0, 0.0), (-360.0, 1.0, 0.0),
(45.0, HALF_SQRT_2, HALF_SQRT_2),
(-45.0, HALF_SQRT_2, -HALF_SQRT_2),
(315.0, HALF_SQRT_2, -HALF_SQRT_2),
(-315.0, HALF_SQRT_2, HALF_SQRT_2),
(30.0, HALF_SQRT_3, 0.5), (-30.0, HALF_SQRT_3, -0.5),
(330.0, HALF_SQRT_3, -0.5), (-330.0, HALF_SQRT_3, 0.5) ]
for test in tests:
radius = random.uniform(0.001, 1000.0)
p = value2D.Point2D(
value.ValueUncertainty(radius, 0.25),
value.ValueUncertainty(0.0, 0.25) )
p.rotate(angle.Angle(degrees=test[0]))
self.assertAlmostEqual(p.x.raw, radius * test[1], 2)
self.assertAlmostEqual(p.y.raw, radius * test[2], 2)
def test_projection(self):
"""
:return:
"""
line_start = value2D.create_point(0, 0)
line_end = value2D.create_point(1, 1)
point = value2D.create_point(0, 1)
result = value2D.closest_point_on_line(point, line_start, line_end)
self.assertIsNotNone(result)
print('PROJECTION:', result)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestValue2D)
unittest.TextTestRunner(verbosity=2).run(suite)
|
sernst/RefinedStatistics
|
measurement_stats/test/test_value2D.py
|
Python
|
mit
| 2,275
|
import nose
def test_nose_working():
"""
Test that the nose runner is working.
"""
assert True
|
cwoodall/doppler-gestures-py
|
tests/test.py
|
Python
|
mit
| 116
|
default_app_config = 'django_tenants.apps.DjangoTenantsConfig'
|
tomturner/django-tenants
|
django_tenants/__init__.py
|
Python
|
mit
| 63
|
# Example taken from the http://gunicorn.org/configure.html
# page.
import os
def numCPUs():
if not hasattr(os, "sysconf"):
raise RuntimeError("No sysconf detected.")
return os.sysconf("SC_NPROCESSORS_ONLN")
bind = "127.0.0.1:8000"
workers = numCPUs() * 2 + 1
|
ucdavis-agecon/gunicorn-init
|
tree/etc/gunicorn/py/example.py
|
Python
|
mit
| 272
|
import os
import libxml2
from sfatables.command import Command
from sfatables.globals import sfatables_config, target_dir, match_dir
class Add(Command):
def __init__(self):
self.options = [('-A','--add')]
self.help = 'Add a rule to a chain'
self.matches = True
self.targets = True
return
def getnextfilename(self,type,chain):
dir = sfatables_config + "/"+chain;
last_rule_number = 0
for (root, dirs, files) in os.walk(dir):
for file in files:
if (file.startswith('sfatables-') and file.endswith(type)):
number_str = file.split('-')[1]
number = int(number_str)
if (number>last_rule_number):
last_rule_number = number
return "sfatables-%d-%s"%(last_rule_number+1,type)
def call_gen(self, chain, type, dir, options):
filename = os.path.join(dir, options.name+".xml")
xmldoc = libxml2.parseFile(filename)
p = xmldoc.xpathNewContext()
supplied_arguments = options.arguments
if (hasattr(options,'element') and options.element):
element = options.element
else:
element='*'
for option in supplied_arguments:
option_name = option['name']
option_value = getattr(options,option_name)
if (hasattr(options,option_name) and getattr(options,option_name)):
context = p.xpathEval("//rule[@element='%s' or @element='*']/argument[name='%s']"%(element, option_name))
if (not context):
raise Exception('Unknown option %s for match %s and element %s'%(option,option['name'], element))
else:
# Add the value of option
valueNode = libxml2.newNode('value')
valueNode.addContent(option_value)
context[0].addChild(valueNode)
filename = self.getnextfilename(type,chain)
file_path = os.path.join(sfatables_config, chain, filename)
if not os.path.isdir(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
xmldoc.saveFile(file_path)
p.xpathFreeContext()
xmldoc.freeDoc()
return True
def call(self, command_options, match_options, target_options):
chain = command_options.args[0]
ret = self.call_gen(chain, 'match',match_dir, match_options)
if (ret):
ret = self.call_gen(chain, 'target',target_dir, target_options)
return ret
|
yippeecw/sfa
|
sfatables/commands/Add.py
|
Python
|
mit
| 2,604
|
#!/usr/bin/env python
#
# GrovePi Example for using the Grove Barometer module (http://www.seeedstudio.com/depot/Grove-Barometer-HighAccuracy-p-1865.html)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import grove_barometer_lib
b = grove_barometer_lib.barometer()
while True():
print ("Temp:",b.temperature," Pressure:",b.pressure," Altitude:",b.altitude)
b.update()
time.sleep(.1)
|
penoud/GrovePi
|
Software/Python/grove_barometer_sensors/barometric_sensor_bmp085/grove_barometer_example_BMP085.py
|
Python
|
mit
| 1,813
|
#!/usr/bin/python
from sensu_plugin import SensuPluginMetricJSON
import requests
#import os
import json
from sh import curl
from walrus import *
#from redis import *
import math
#from requests.auth import HTTPBasicAuth
import statsd
import warnings
from requests.packages.urllib3 import exceptions
db = Database(host='localhost', port=6379, db=0)
c = statsd.StatsClient('grafana', 8125)
class FooBarBazMetricJSON(SensuPluginMetricJSON):
def run(self):
endpoints = ['topology', 'remediations']
positions = [30, 50, 99]
api = 'ecepeda-api.route105.net'
token_curl = curl('https://{0}/aims/v1/authenticate'.format(api), '-s', '-k', '-X', 'POST', '-H', 'Accept: application/json', '--user', '2A6B0U16535H6X0D5822:$2a$12$WB8KmRcUnGpf1M6oEdLBe.GrfBEaa94U4QMBTPMuVWktWZf91AJk')
headers = {'X-Iam-Auth-Token': json.loads(str(token_curl))['authentication']['token'], 'X-Request-Id': 'DEADBEEF'}
for endpoint in endpoints:
a = db.ZSet('measures_{0}'.format(endpoint))
percentiles = db.Hash('percentiles_{0}'.format(endpoint))
current = percentiles['current']
if current is None or int(current) > 99:
current = 1
url = 'https://{0}/assets/v1/67000001/environments/814C2911-09BB-1005-9916-7831C1BAC182/{1}'.format(api, endpoint)
with warnings.catch_warnings():
warnings.simplefilter("ignore", exceptions.InsecureRequestWarning)
r = requests.get(url, headers=headers, verify=False)
a.remove(current)
a.add(current, r.elapsed.microseconds)
c.timing(endpoint, int(r.elapsed.microseconds)/1000)
iterate = True
elements = []
iterator = a.__iter__()
while iterate:
try:
elem = iterator.next()
elements.append({'position': elem[0], 'time': elem[1]})
except:
iterate = False
if len(elements) > 0:
for percentile in positions:
position = (percentile*.01) * len(elements) - 1
percentiles[percentile] = elements[int(math.ceil(position))]
percentiles['current'] = int(current) + 1
self.output(str(percentiles))
self.warning(str(endpoints))
if __name__ == "__main__":
f = FooBarBazMetricJSON()
|
PicOrb/docker-sensu-server
|
plugins/aux/check-boundary.py
|
Python
|
mit
| 2,430
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
"""ACE -- Automated Coordinate Extraction.
"""
__all__ = ["config", "database", "datatable", "exporter", "set_logging_level", "scrape", "sources", "tableparser", "tests", "__version__"]
import logging
import sys
import os
from version import __version__
def set_logging_level(level=None):
"""Set package-wide logging level
Args
level : Logging level constant from logging module (warning, error, info, etc.)
"""
if level is None:
level = os.environ.get('ACE_LOGLEVEL', 'warn')
logger.setLevel(getattr(logging, level.upper()))
return logger.getEffectiveLevel()
def _setup_logger(logger):
# Basic logging setup
console = logging.StreamHandler(sys.stdout)
console.setFormatter(logging.Formatter("%(levelname)-6s %(module)-7s %(message)s"))
logger.addHandler(console)
set_logging_level()
# Set up logger
logger = logging.getLogger("ace")
_setup_logger(logger)
|
neurosynth/ACE
|
ace/__init__.py
|
Python
|
mit
| 1,044
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated by generateDS.py.
#
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class people(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('comments', 'comments', 1),
MemberSpec_('person', 'person', 1),
MemberSpec_('programmer', 'programmer', 1),
MemberSpec_('python_programmer', 'python-programmer', 1),
MemberSpec_('java_programmer', 'java-programmer', 1),
]
subclass = None
superclass = None
def __init__(self, comments=None, person=None, programmer=None, python_programmer=None, java_programmer=None):
if comments is None:
self.comments = []
else:
self.comments = comments
if person is None:
self.person = []
else:
self.person = person
if programmer is None:
self.programmer = []
else:
self.programmer = programmer
if python_programmer is None:
self.python_programmer = []
else:
self.python_programmer = python_programmer
if java_programmer is None:
self.java_programmer = []
else:
self.java_programmer = java_programmer
def factory(*args_, **kwargs_):
if people.subclass:
return people.subclass(*args_, **kwargs_)
else:
return people(*args_, **kwargs_)
factory = staticmethod(factory)
def get_comments(self): return self.comments
def set_comments(self, comments): self.comments = comments
def add_comments(self, value): self.comments.append(value)
def insert_comments(self, index, value): self.comments[index] = value
def get_person(self): return self.person
def set_person(self, person): self.person = person
def add_person(self, value): self.person.append(value)
def insert_person(self, index, value): self.person[index] = value
def get_programmer(self): return self.programmer
def set_programmer(self, programmer): self.programmer = programmer
def add_programmer(self, value): self.programmer.append(value)
def insert_programmer(self, index, value): self.programmer[index] = value
def get_python_programmer(self): return self.python_programmer
def set_python_programmer(self, python_programmer): self.python_programmer = python_programmer
def add_python_programmer(self, value): self.python_programmer.append(value)
def insert_python_programmer(self, index, value): self.python_programmer[index] = value
def get_java_programmer(self): return self.java_programmer
def set_java_programmer(self, java_programmer): self.java_programmer = java_programmer
def add_java_programmer(self, value): self.java_programmer.append(value)
def insert_java_programmer(self, index, value): self.java_programmer[index] = value
def hasContent_(self):
if (
self.comments or
self.person or
self.programmer or
self.python_programmer or
self.java_programmer
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='people', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='people')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='people'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='people', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for comments_ in self.comments:
comments_.export(outfile, level, namespace_, name_='comments', pretty_print=pretty_print)
for person_ in self.person:
person_.export(outfile, level, namespace_, name_='person', pretty_print=pretty_print)
for programmer_ in self.programmer:
programmer_.export(outfile, level, namespace_, name_='programmer', pretty_print=pretty_print)
for python_programmer_ in self.python_programmer:
python_programmer_.export(outfile, level, namespace_, name_='python-programmer', pretty_print=pretty_print)
for java_programmer_ in self.java_programmer:
java_programmer_.export(outfile, level, namespace_, name_='java-programmer', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='people'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('comments=[\n')
level += 1
for comments_ in self.comments:
showIndent(outfile, level)
outfile.write('model_.comments(\n')
comments_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('person=[\n')
level += 1
for person_ in self.person:
showIndent(outfile, level)
outfile.write('model_.person(\n')
person_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('programmer=[\n')
level += 1
for programmer_ in self.programmer:
showIndent(outfile, level)
outfile.write('model_.programmer(\n')
programmer_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('python_programmer=[\n')
level += 1
for python_programmer_ in self.python_programmer:
showIndent(outfile, level)
outfile.write('model_.python_programmer(\n')
python_programmer_.exportLiteral(outfile, level, name_='python-programmer')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('java_programmer=[\n')
level += 1
for java_programmer_ in self.java_programmer:
showIndent(outfile, level)
outfile.write('model_.java_programmer(\n')
java_programmer_.exportLiteral(outfile, level, name_='java-programmer')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'comments':
obj_ = comments.factory()
obj_.build(child_)
self.comments.append(obj_)
elif nodeName_ == 'person':
class_obj_ = self.get_class_obj_(child_, person)
obj_ = class_obj_.factory()
obj_.build(child_)
self.person.append(obj_)
elif nodeName_ == 'programmer':
class_obj_ = self.get_class_obj_(child_, programmer)
obj_ = class_obj_.factory()
obj_.build(child_)
self.programmer.append(obj_)
elif nodeName_ == 'python-programmer':
obj_ = python_programmer.factory()
obj_.build(child_)
self.python_programmer.append(obj_)
elif nodeName_ == 'java-programmer':
obj_ = java_programmer.factory()
obj_.build(child_)
self.java_programmer.append(obj_)
def walk_and_update(self):
members = people._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if people.superclass != None:
people.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: people depth: %d' % (counter, depth, )
members = people._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
def set_up(self):
global types, counter
import types as types_module
types = types_module
counter = 0
# end class people
class comments(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('emp', 'xs:string', 1),
MemberSpec_('valueOf_', [], 0),
]
subclass = None
superclass = None
def __init__(self, emp=None, valueOf_=None, mixedclass_=None, content_=None):
if emp is None:
self.emp = []
else:
self.emp = emp
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if comments.subclass:
return comments.subclass(*args_, **kwargs_)
else:
return comments(*args_, **kwargs_)
factory = staticmethod(factory)
def get_emp(self): return self.emp
def set_emp(self, emp): self.emp = emp
def add_emp(self, value): self.emp.append(value)
def insert_emp(self, index, value): self.emp[index] = value
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.emp or
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='comments', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='comments')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='comments'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='comments', fromsubclass_=False, pretty_print=True):
if not fromsubclass_:
for item_ in self.content_:
item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='comments'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('content_ = [\n')
for item_ in self.content_:
item_.exportLiteral(outfile, level, name_)
showIndent(outfile, level)
outfile.write('],\n')
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'emp' and child_.text is not None:
valuestr_ = child_.text
obj_ = self.mixedclass_(MixedContainer.CategorySimple,
MixedContainer.TypeString, 'emp', valuestr_)
self.content_.append(obj_)
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
def walk_and_update(self):
members = comments._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if comments.superclass != None:
comments.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: comments depth: %d' % (counter, depth, )
members = comments._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class comments
class person(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('vegetable', 'xs:string', 0),
MemberSpec_('fruit', 'xs:string', 0),
MemberSpec_('ratio', 'xs:float', 0),
MemberSpec_('id', 'xs:integer', 0),
MemberSpec_('value', 'xs:string', 0),
MemberSpec_('name', 'xs:string', 0),
MemberSpec_('interest', 'xs:string', 1),
MemberSpec_('category', 'xs:integer', 0),
MemberSpec_('agent', 'agent', 1),
MemberSpec_('promoter', 'booster', 1),
MemberSpec_('description', 'xs:string', 0),
]
subclass = None
superclass = None
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, extensiontype_=None):
self.vegetable = _cast(None, vegetable)
self.fruit = _cast(None, fruit)
self.ratio = _cast(float, ratio)
self.id = _cast(int, id)
self.value = _cast(None, value)
self.name = name
if interest is None:
self.interest = []
else:
self.interest = interest
self.category = category
if agent is None:
self.agent = []
else:
self.agent = agent
if promoter is None:
self.promoter = []
else:
self.promoter = promoter
self.description = description
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if person.subclass:
return person.subclass(*args_, **kwargs_)
else:
return person(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_interest(self): return self.interest
def set_interest(self, interest): self.interest = interest
def add_interest(self, value): self.interest.append(value)
def insert_interest(self, index, value): self.interest[index] = value
def get_category(self): return self.category
def set_category(self, category): self.category = category
def get_agent(self): return self.agent
def set_agent(self, agent): self.agent = agent
def add_agent(self, value): self.agent.append(value)
def insert_agent(self, index, value): self.agent[index] = value
def get_promoter(self): return self.promoter
def set_promoter(self, promoter): self.promoter = promoter
def add_promoter(self, value): self.promoter.append(value)
def insert_promoter(self, index, value): self.promoter[index] = value
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_vegetable(self): return self.vegetable
def set_vegetable(self, vegetable): self.vegetable = vegetable
def get_fruit(self): return self.fruit
def set_fruit(self, fruit): self.fruit = fruit
def get_ratio(self): return self.ratio
def set_ratio(self, ratio): self.ratio = ratio
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.name is not None or
self.interest or
self.category is not None or
self.agent or
self.promoter or
self.description is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='person', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='person')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='person'):
if self.vegetable is not None and 'vegetable' not in already_processed:
already_processed.add('vegetable')
outfile.write(' vegetable=%s' % (self.gds_format_string(quote_attrib(self.vegetable).encode(ExternalEncoding), input_name='vegetable'), ))
if self.fruit is not None and 'fruit' not in already_processed:
already_processed.add('fruit')
outfile.write(' fruit=%s' % (self.gds_format_string(quote_attrib(self.fruit).encode(ExternalEncoding), input_name='fruit'), ))
if self.ratio is not None and 'ratio' not in already_processed:
already_processed.add('ratio')
outfile.write(' ratio="%s"' % self.gds_format_float(self.ratio, input_name='ratio'))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='person', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_, eol_))
for interest_ in self.interest:
showIndent(outfile, level, pretty_print)
outfile.write('<%sinterest>%s</%sinterest>%s' % (namespace_, self.gds_format_string(quote_xml(interest_).encode(ExternalEncoding), input_name='interest'), namespace_, eol_))
if self.category is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scategory>%s</%scategory>%s' % (namespace_, self.gds_format_integer(self.category, input_name='category'), namespace_, eol_))
for agent_ in self.agent:
agent_.export(outfile, level, namespace_, name_='agent', pretty_print=pretty_print)
for promoter_ in self.promoter:
promoter_.export(outfile, level, namespace_, name_='promoter', pretty_print=pretty_print)
if self.description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdescription>%s</%sdescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='person'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.vegetable is not None and 'vegetable' not in already_processed:
already_processed.add('vegetable')
showIndent(outfile, level)
outfile.write('vegetable="%s",\n' % (self.vegetable,))
if self.fruit is not None and 'fruit' not in already_processed:
already_processed.add('fruit')
showIndent(outfile, level)
outfile.write('fruit="%s",\n' % (self.fruit,))
if self.ratio is not None and 'ratio' not in already_processed:
already_processed.add('ratio')
showIndent(outfile, level)
outfile.write('ratio=%f,\n' % (self.ratio,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id=%d,\n' % (self.id,))
if self.value is not None and 'value' not in already_processed:
already_processed.add('value')
showIndent(outfile, level)
outfile.write('value="%s",\n' % (self.value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.name is not None:
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
showIndent(outfile, level)
outfile.write('interest=[\n')
level += 1
for interest_ in self.interest:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(interest_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.category is not None:
showIndent(outfile, level)
outfile.write('category=%d,\n' % self.category)
showIndent(outfile, level)
outfile.write('agent=[\n')
level += 1
for agent_ in self.agent:
showIndent(outfile, level)
outfile.write('model_.agent(\n')
agent_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('promoter=[\n')
level += 1
for promoter_ in self.promoter:
showIndent(outfile, level)
outfile.write('model_.booster(\n')
promoter_.exportLiteral(outfile, level, name_='booster')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.description is not None:
showIndent(outfile, level)
outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('vegetable', node)
if value is not None and 'vegetable' not in already_processed:
already_processed.add('vegetable')
self.vegetable = value
value = find_attr_value_('fruit', node)
if value is not None and 'fruit' not in already_processed:
already_processed.add('fruit')
self.fruit = value
value = find_attr_value_('ratio', node)
if value is not None and 'ratio' not in already_processed:
already_processed.add('ratio')
try:
self.ratio = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (ratio): %s' % exp)
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
try:
self.id = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.add('value')
self.value = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'interest':
interest_ = child_.text
interest_ = self.gds_validate_string(interest_, node, 'interest')
self.interest.append(interest_)
elif nodeName_ == 'category':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'category')
self.category = ival_
elif nodeName_ == 'agent':
obj_ = agent.factory()
obj_.build(child_)
self.agent.append(obj_)
elif nodeName_ == 'promoter':
obj_ = booster.factory()
obj_.build(child_)
self.promoter.append(obj_)
elif nodeName_ == 'description':
description_ = child_.text
description_ = self.gds_validate_string(description_, node, 'description')
self.description = description_
def walk_and_update(self):
members = person._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if person.superclass != None:
person.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: person depth: %d' % (counter, depth, )
members = person._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class person
class programmer(person):
member_data_items_ = [
MemberSpec_('language', 'xs:string', 0),
MemberSpec_('area', 'xs:string', 0),
MemberSpec_('attrnegint', 'xs:negativeInteger', 0),
MemberSpec_('attrposint', 'xs:positiveInteger', 0),
MemberSpec_('attrnonnegint', 'xs:nonNegativeInteger', 0),
MemberSpec_('attrnonposint', 'xs:nonPositiveInteger', 0),
MemberSpec_('email', 'xs:string', 0),
MemberSpec_('elposint', 'xs:positiveInteger', 0),
MemberSpec_('elnonposint', 'xs:nonPositiveInteger', 0),
MemberSpec_('elnegint', 'xs:negativeInteger', 0),
MemberSpec_('elnonnegint', 'xs:nonNegativeInteger', 0),
MemberSpec_('eldate', 'xs:date', 0),
MemberSpec_('eldatetime', 'xs:dateTime', 0),
MemberSpec_('eltoken', 'xs:token', 0),
MemberSpec_('elshort', 'xs:short', 0),
MemberSpec_('ellong', 'xs:long', 0),
MemberSpec_('elparam', 'param', 0),
MemberSpec_('elarraytypes', ['ArrayTypes', 'xs:NMTOKEN'], 0),
]
subclass = None
superclass = person
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eldatetime=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, extensiontype_=None):
super(programmer, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, extensiontype_, )
self.language = _cast(None, language)
self.area = _cast(None, area)
self.attrnegint = _cast(int, attrnegint)
self.attrposint = _cast(int, attrposint)
self.attrnonnegint = _cast(int, attrnonnegint)
self.attrnonposint = _cast(int, attrnonposint)
self.email = email
self.elposint = elposint
self.elnonposint = elnonposint
self.elnegint = elnegint
self.elnonnegint = elnonnegint
if isinstance(eldate, basestring):
initvalue_ = datetime_.datetime.strptime(eldate, '%Y-%m-%d').date()
else:
initvalue_ = eldate
self.eldate = initvalue_
if isinstance(eldatetime, basestring):
initvalue_ = datetime_.datetime.strptime(eldatetime, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = eldatetime
self.eldatetime = initvalue_
self.eltoken = eltoken
self.elshort = elshort
self.ellong = ellong
self.elparam = elparam
self.elarraytypes = elarraytypes
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if programmer.subclass:
return programmer.subclass(*args_, **kwargs_)
else:
return programmer(*args_, **kwargs_)
factory = staticmethod(factory)
def get_email(self): return self.email
def set_email(self, email): self.email = email
def get_elposint(self): return self.elposint
def set_elposint(self, elposint): self.elposint = elposint
def get_elnonposint(self): return self.elnonposint
def set_elnonposint(self, elnonposint): self.elnonposint = elnonposint
def get_elnegint(self): return self.elnegint
def set_elnegint(self, elnegint): self.elnegint = elnegint
def get_elnonnegint(self): return self.elnonnegint
def set_elnonnegint(self, elnonnegint): self.elnonnegint = elnonnegint
def get_eldate(self): return self.eldate
def set_eldate(self, eldate): self.eldate = eldate
def get_eldatetime(self): return self.eldatetime
def set_eldatetime(self, eldatetime): self.eldatetime = eldatetime
def get_eltoken(self): return self.eltoken
def set_eltoken(self, eltoken): self.eltoken = eltoken
def get_elshort(self): return self.elshort
def set_elshort(self, elshort): self.elshort = elshort
def get_ellong(self): return self.ellong
def set_ellong(self, ellong): self.ellong = ellong
def get_elparam(self): return self.elparam
def set_elparam(self, elparam): self.elparam = elparam
def get_elarraytypes(self): return self.elarraytypes
def set_elarraytypes(self, elarraytypes): self.elarraytypes = elarraytypes
def get_language(self): return self.language
def set_language(self, language): self.language = language
def get_area(self): return self.area
def set_area(self, area): self.area = area
def get_attrnegint(self): return self.attrnegint
def set_attrnegint(self, attrnegint): self.attrnegint = attrnegint
def get_attrposint(self): return self.attrposint
def set_attrposint(self, attrposint): self.attrposint = attrposint
def get_attrnonnegint(self): return self.attrnonnegint
def set_attrnonnegint(self, attrnonnegint): self.attrnonnegint = attrnonnegint
def get_attrnonposint(self): return self.attrnonposint
def set_attrnonposint(self, attrnonposint): self.attrnonposint = attrnonposint
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def validate_ArrayTypes(self, value):
# Validate type ArrayTypes, a restriction on xs:NMTOKEN.
pass
def hasContent_(self):
if (
self.email is not None or
self.elposint is not None or
self.elnonposint is not None or
self.elnegint is not None or
self.elnonnegint is not None or
self.eldate is not None or
self.eldatetime is not None or
self.eltoken is not None or
self.elshort is not None or
self.ellong is not None or
self.elparam is not None or
self.elarraytypes is not None or
super(programmer, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='programmer', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='programmer')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='programmer'):
super(programmer, self).exportAttributes(outfile, level, already_processed, namespace_, name_='programmer')
if self.language is not None and 'language' not in already_processed:
already_processed.add('language')
outfile.write(' language=%s' % (self.gds_format_string(quote_attrib(self.language).encode(ExternalEncoding), input_name='language'), ))
if self.area is not None and 'area' not in already_processed:
already_processed.add('area')
outfile.write(' area=%s' % (self.gds_format_string(quote_attrib(self.area).encode(ExternalEncoding), input_name='area'), ))
if self.attrnegint is not None and 'attrnegint' not in already_processed:
already_processed.add('attrnegint')
outfile.write(' attrnegint="%s"' % self.gds_format_integer(self.attrnegint, input_name='attrnegint'))
if self.attrposint is not None and 'attrposint' not in already_processed:
already_processed.add('attrposint')
outfile.write(' attrposint="%s"' % self.gds_format_integer(self.attrposint, input_name='attrposint'))
if self.attrnonnegint is not None and 'attrnonnegint' not in already_processed:
already_processed.add('attrnonnegint')
outfile.write(' attrnonnegint="%s"' % self.gds_format_integer(self.attrnonnegint, input_name='attrnonnegint'))
if self.attrnonposint is not None and 'attrnonposint' not in already_processed:
already_processed.add('attrnonposint')
outfile.write(' attrnonposint="%s"' % self.gds_format_integer(self.attrnonposint, input_name='attrnonposint'))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='', name_='programmer', fromsubclass_=False, pretty_print=True):
super(programmer, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.email is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%semail>%s</%semail>%s' % (namespace_, self.gds_format_string(quote_xml(self.email).encode(ExternalEncoding), input_name='email'), namespace_, eol_))
if self.elposint is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selposint>%s</%selposint>%s' % (namespace_, self.gds_format_integer(self.elposint, input_name='elposint'), namespace_, eol_))
if self.elnonposint is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selnonposint>%s</%selnonposint>%s' % (namespace_, self.gds_format_integer(self.elnonposint, input_name='elnonposint'), namespace_, eol_))
if self.elnegint is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selnegint>%s</%selnegint>%s' % (namespace_, self.gds_format_integer(self.elnegint, input_name='elnegint'), namespace_, eol_))
if self.elnonnegint is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selnonnegint>%s</%selnonnegint>%s' % (namespace_, self.gds_format_integer(self.elnonnegint, input_name='elnonnegint'), namespace_, eol_))
if self.eldate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%seldate>%s</%seldate>%s' % (namespace_, self.gds_format_date(self.eldate, input_name='eldate'), namespace_, eol_))
if self.eldatetime is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%seldatetime>%s</%seldatetime>%s' % (namespace_, self.gds_format_datetime(self.eldatetime, input_name='eldatetime'), namespace_, eol_))
if self.eltoken is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%seltoken>%s</%seltoken>%s' % (namespace_, self.gds_format_string(quote_xml(self.eltoken).encode(ExternalEncoding), input_name='eltoken'), namespace_, eol_))
if self.elshort is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selshort>%s</%selshort>%s' % (namespace_, self.gds_format_integer(self.elshort, input_name='elshort'), namespace_, eol_))
if self.ellong is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sellong>%s</%sellong>%s' % (namespace_, self.gds_format_integer(self.ellong, input_name='ellong'), namespace_, eol_))
if self.elparam is not None:
self.elparam.export(outfile, level, namespace_, name_='elparam', pretty_print=pretty_print)
if self.elarraytypes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%selarraytypes>%s</%selarraytypes>%s' % (namespace_, self.gds_format_string(quote_xml(self.elarraytypes).encode(ExternalEncoding), input_name='elarraytypes'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='programmer'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.language is not None and 'language' not in already_processed:
already_processed.add('language')
showIndent(outfile, level)
outfile.write('language="%s",\n' % (self.language,))
if self.area is not None and 'area' not in already_processed:
already_processed.add('area')
showIndent(outfile, level)
outfile.write('area="%s",\n' % (self.area,))
if self.attrnegint is not None and 'attrnegint' not in already_processed:
already_processed.add('attrnegint')
showIndent(outfile, level)
outfile.write('attrnegint=%d,\n' % (self.attrnegint,))
if self.attrposint is not None and 'attrposint' not in already_processed:
already_processed.add('attrposint')
showIndent(outfile, level)
outfile.write('attrposint=%d,\n' % (self.attrposint,))
if self.attrnonnegint is not None and 'attrnonnegint' not in already_processed:
already_processed.add('attrnonnegint')
showIndent(outfile, level)
outfile.write('attrnonnegint=%d,\n' % (self.attrnonnegint,))
if self.attrnonposint is not None and 'attrnonposint' not in already_processed:
already_processed.add('attrnonposint')
showIndent(outfile, level)
outfile.write('attrnonposint=%d,\n' % (self.attrnonposint,))
super(programmer, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(programmer, self).exportLiteralChildren(outfile, level, name_)
if self.email is not None:
showIndent(outfile, level)
outfile.write('email=%s,\n' % quote_python(self.email).encode(ExternalEncoding))
if self.elposint is not None:
showIndent(outfile, level)
outfile.write('elposint=%d,\n' % self.elposint)
if self.elnonposint is not None:
showIndent(outfile, level)
outfile.write('elnonposint=%d,\n' % self.elnonposint)
if self.elnegint is not None:
showIndent(outfile, level)
outfile.write('elnegint=%d,\n' % self.elnegint)
if self.elnonnegint is not None:
showIndent(outfile, level)
outfile.write('elnonnegint=%d,\n' % self.elnonnegint)
if self.eldate is not None:
showIndent(outfile, level)
outfile.write('eldate=model_.GeneratedsSuper.gds_parse_date("%s"),\n' % self.gds_format_date(self.eldate, input_name='eldate'))
if self.eldatetime is not None:
showIndent(outfile, level)
outfile.write('eldatetime=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.eldatetime, input_name='eldatetime'))
if self.eltoken is not None:
showIndent(outfile, level)
outfile.write('eltoken=%s,\n' % quote_python(self.eltoken).encode(ExternalEncoding))
if self.elshort is not None:
showIndent(outfile, level)
outfile.write('elshort=%d,\n' % self.elshort)
if self.ellong is not None:
showIndent(outfile, level)
outfile.write('ellong=%d,\n' % self.ellong)
if self.elparam is not None:
showIndent(outfile, level)
outfile.write('elparam=model_.param(\n')
self.elparam.exportLiteral(outfile, level, name_='elparam')
showIndent(outfile, level)
outfile.write('),\n')
if self.elarraytypes is not None:
showIndent(outfile, level)
outfile.write('elarraytypes=%s,\n' % quote_python(self.elarraytypes).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('language', node)
if value is not None and 'language' not in already_processed:
already_processed.add('language')
self.language = value
value = find_attr_value_('area', node)
if value is not None and 'area' not in already_processed:
already_processed.add('area')
self.area = value
value = find_attr_value_('attrnegint', node)
if value is not None and 'attrnegint' not in already_processed:
already_processed.add('attrnegint')
try:
self.attrnegint = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.attrnegint >= 0:
raise_parse_error(node, 'Invalid NegativeInteger')
value = find_attr_value_('attrposint', node)
if value is not None and 'attrposint' not in already_processed:
already_processed.add('attrposint')
try:
self.attrposint = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.attrposint <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
value = find_attr_value_('attrnonnegint', node)
if value is not None and 'attrnonnegint' not in already_processed:
already_processed.add('attrnonnegint')
try:
self.attrnonnegint = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.attrnonnegint < 0:
raise_parse_error(node, 'Invalid NonNegativeInteger')
value = find_attr_value_('attrnonposint', node)
if value is not None and 'attrnonposint' not in already_processed:
already_processed.add('attrnonposint')
try:
self.attrnonposint = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.attrnonposint > 0:
raise_parse_error(node, 'Invalid NonPositiveInteger')
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
super(programmer, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'email':
email_ = child_.text
email_ = self.gds_validate_string(email_, node, 'email')
self.email = email_
elif nodeName_ == 'elposint':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ <= 0:
raise_parse_error(child_, 'requires positiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'elposint')
self.elposint = ival_
elif nodeName_ == 'elnonposint':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ > 0:
raise_parse_error(child_, 'requires nonPositiveInteger')
ival_ = self.gds_validate_integer(ival_, node, 'elnonposint')
self.elnonposint = ival_
elif nodeName_ == 'elnegint':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ >= 0:
raise_parse_error(child_, 'requires negativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'elnegint')
self.elnegint = ival_
elif nodeName_ == 'elnonnegint':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
if ival_ < 0:
raise_parse_error(child_, 'requires nonNegativeInteger')
ival_ = self.gds_validate_integer(ival_, node, 'elnonnegint')
self.elnonnegint = ival_
elif nodeName_ == 'eldate':
sval_ = child_.text
dval_ = self.gds_parse_date(sval_)
self.eldate = dval_
elif nodeName_ == 'eldatetime':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.eldatetime = dval_
elif nodeName_ == 'eltoken':
eltoken_ = child_.text
eltoken_ = re_.sub(String_cleanup_pat_, " ", eltoken_).strip()
eltoken_ = self.gds_validate_string(eltoken_, node, 'eltoken')
self.eltoken = eltoken_
elif nodeName_ == 'elshort':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'elshort')
self.elshort = ival_
elif nodeName_ == 'ellong':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'ellong')
self.ellong = ival_
elif nodeName_ == 'elparam':
obj_ = param.factory()
obj_.build(child_)
self.elparam = obj_
elif nodeName_ == 'elarraytypes':
elarraytypes_ = child_.text
elarraytypes_ = self.gds_validate_string(elarraytypes_, node, 'elarraytypes')
self.elarraytypes = elarraytypes_
self.validate_ArrayTypes(self.elarraytypes) # validate type ArrayTypes
super(programmer, self).buildChildren(child_, node, nodeName_, True)
def walk_and_update(self):
members = programmer._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if programmer.superclass != None:
programmer.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: programmer depth: %d' % (counter, depth, )
members = programmer._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class programmer
class param(GeneratedsSuper):
"""Finding flow attribute unneccesary in practice. A unnamed parameter
is unbound/skipped."""
member_data_items_ = [
MemberSpec_('semantic', 'xs:token', 0),
MemberSpec_('name', 'xs:NCName', 0),
MemberSpec_('flow', 'FlowType', 0),
MemberSpec_('sid', 'xs:NCName', 0),
MemberSpec_('type', 'xs:NMTOKEN', 0),
MemberSpec_('id', 'xs:string', 0),
MemberSpec_('valueOf_', 'xs:string', 0),
]
subclass = None
superclass = None
def __init__(self, semantic=None, name=None, flow=None, sid=None, type_=None, id=None, valueOf_=None):
self.semantic = _cast(None, semantic)
self.name = _cast(None, name)
self.flow = _cast(None, flow)
self.sid = _cast(None, sid)
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if param.subclass:
return param.subclass(*args_, **kwargs_)
else:
return param(*args_, **kwargs_)
factory = staticmethod(factory)
def get_semantic(self): return self.semantic
def set_semantic(self, semantic): self.semantic = semantic
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_flow(self): return self.flow
def set_flow(self, flow): self.flow = flow
def get_sid(self): return self.sid
def set_sid(self, sid): self.sid = sid
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='param', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='param')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='param'):
if self.semantic is not None and 'semantic' not in already_processed:
already_processed.add('semantic')
outfile.write(' semantic=%s' % (self.gds_format_string(quote_attrib(self.semantic).encode(ExternalEncoding), input_name='semantic'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (quote_attrib(self.name), ))
if self.flow is not None and 'flow' not in already_processed:
already_processed.add('flow')
outfile.write(' flow=%s' % (quote_attrib(self.flow), ))
if self.sid is not None and 'sid' not in already_processed:
already_processed.add('sid')
outfile.write(' sid=%s' % (quote_attrib(self.sid), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='', name_='param', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='param'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.semantic is not None and 'semantic' not in already_processed:
already_processed.add('semantic')
showIndent(outfile, level)
outfile.write('semantic="%s",\n' % (self.semantic,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
if self.flow is not None and 'flow' not in already_processed:
already_processed.add('flow')
showIndent(outfile, level)
outfile.write('flow=%s,\n' % (self.flow,))
if self.sid is not None and 'sid' not in already_processed:
already_processed.add('sid')
showIndent(outfile, level)
outfile.write('sid="%s",\n' % (self.sid,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_="%s",\n' % (self.type_,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id="%s",\n' % (self.id,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('semantic', node)
if value is not None and 'semantic' not in already_processed:
already_processed.add('semantic')
self.semantic = value
self.semantic = ' '.join(self.semantic.split())
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('flow', node)
if value is not None and 'flow' not in already_processed:
already_processed.add('flow')
self.flow = value
value = find_attr_value_('sid', node)
if value is not None and 'sid' not in already_processed:
already_processed.add('sid')
self.sid = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
def walk_and_update(self):
members = param._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if param.superclass != None:
param.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: param depth: %d' % (counter, depth, )
members = param._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class param
class python_programmer(programmer):
member_data_items_ = [
MemberSpec_('nick-name', 'xs:string', 0),
MemberSpec_('favorite_editor', 'xs:string', 0),
]
subclass = None
superclass = programmer
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eldatetime=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, nick_name=None, favorite_editor=None):
super(python_programmer, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eldatetime, eltoken, elshort, ellong, elparam, elarraytypes, )
self.nick_name = _cast(None, nick_name)
self.favorite_editor = favorite_editor
def factory(*args_, **kwargs_):
if python_programmer.subclass:
return python_programmer.subclass(*args_, **kwargs_)
else:
return python_programmer(*args_, **kwargs_)
factory = staticmethod(factory)
def get_favorite_editor(self): return self.favorite_editor
def set_favorite_editor(self, favorite_editor): self.favorite_editor = favorite_editor
def get_nick_name(self): return self.nick_name
def set_nick_name(self, nick_name): self.nick_name = nick_name
def hasContent_(self):
if (
self.favorite_editor is not None or
super(python_programmer, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='python-programmer', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='python-programmer')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='python-programmer'):
super(python_programmer, self).exportAttributes(outfile, level, already_processed, namespace_, name_='python-programmer')
if self.nick_name is not None and 'nick_name' not in already_processed:
already_processed.add('nick_name')
outfile.write(' nick-name=%s' % (self.gds_format_string(quote_attrib(self.nick_name).encode(ExternalEncoding), input_name='nick-name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='python-programmer', fromsubclass_=False, pretty_print=True):
super(python_programmer, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.favorite_editor is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfavorite-editor>%s</%sfavorite-editor>%s' % (namespace_, self.gds_format_string(quote_xml(self.favorite_editor).encode(ExternalEncoding), input_name='favorite-editor'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='python-programmer'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.nick_name is not None and 'nick_name' not in already_processed:
already_processed.add('nick_name')
showIndent(outfile, level)
outfile.write('nick_name="%s",\n' % (self.nick_name,))
super(python_programmer, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(python_programmer, self).exportLiteralChildren(outfile, level, name_)
if self.favorite_editor is not None:
showIndent(outfile, level)
outfile.write('favorite_editor=%s,\n' % quote_python(self.favorite_editor).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('nick-name', node)
if value is not None and 'nick-name' not in already_processed:
already_processed.add('nick-name')
self.nick_name = value
super(python_programmer, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'favorite-editor':
favorite_editor_ = child_.text
favorite_editor_ = self.gds_validate_string(favorite_editor_, node, 'favorite_editor')
self.favorite_editor = favorite_editor_
super(python_programmer, self).buildChildren(child_, node, nodeName_, True)
def walk_and_update(self):
members = python_programmer._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if python_programmer.superclass != None:
python_programmer.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: python_programmer depth: %d' % (counter, depth, )
members = python_programmer._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class python_programmer
class java_programmer(programmer):
member_data_items_ = [
MemberSpec_('status', 'xs:string', 0),
MemberSpec_('nick-name', 'xs:string', 0),
MemberSpec_('favorite_editor', 'xs:string', 0),
]
subclass = None
superclass = programmer
def __init__(self, vegetable=None, fruit=None, ratio=None, id=None, value=None, name=None, interest=None, category=None, agent=None, promoter=None, description=None, language=None, area=None, attrnegint=None, attrposint=None, attrnonnegint=None, attrnonposint=None, email=None, elposint=None, elnonposint=None, elnegint=None, elnonnegint=None, eldate=None, eldatetime=None, eltoken=None, elshort=None, ellong=None, elparam=None, elarraytypes=None, status=None, nick_name=None, favorite_editor=None):
super(java_programmer, self).__init__(vegetable, fruit, ratio, id, value, name, interest, category, agent, promoter, description, language, area, attrnegint, attrposint, attrnonnegint, attrnonposint, email, elposint, elnonposint, elnegint, elnonnegint, eldate, eldatetime, eltoken, elshort, ellong, elparam, elarraytypes, )
self.status = _cast(None, status)
self.nick_name = _cast(None, nick_name)
self.favorite_editor = favorite_editor
def factory(*args_, **kwargs_):
if java_programmer.subclass:
return java_programmer.subclass(*args_, **kwargs_)
else:
return java_programmer(*args_, **kwargs_)
factory = staticmethod(factory)
def get_favorite_editor(self): return self.favorite_editor
def set_favorite_editor(self, favorite_editor): self.favorite_editor = favorite_editor
def get_status(self): return self.status
def set_status(self, status): self.status = status
def get_nick_name(self): return self.nick_name
def set_nick_name(self, nick_name): self.nick_name = nick_name
def hasContent_(self):
if (
self.favorite_editor is not None or
super(java_programmer, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='java-programmer', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='java-programmer')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='java-programmer'):
super(java_programmer, self).exportAttributes(outfile, level, already_processed, namespace_, name_='java-programmer')
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
outfile.write(' status=%s' % (self.gds_format_string(quote_attrib(self.status).encode(ExternalEncoding), input_name='status'), ))
if self.nick_name is not None and 'nick_name' not in already_processed:
already_processed.add('nick_name')
outfile.write(' nick-name=%s' % (self.gds_format_string(quote_attrib(self.nick_name).encode(ExternalEncoding), input_name='nick-name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='java-programmer', fromsubclass_=False, pretty_print=True):
super(java_programmer, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.favorite_editor is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfavorite-editor>%s</%sfavorite-editor>%s' % (namespace_, self.gds_format_string(quote_xml(self.favorite_editor).encode(ExternalEncoding), input_name='favorite-editor'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='java-programmer'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.status is not None and 'status' not in already_processed:
already_processed.add('status')
showIndent(outfile, level)
outfile.write('status="%s",\n' % (self.status,))
if self.nick_name is not None and 'nick_name' not in already_processed:
already_processed.add('nick_name')
showIndent(outfile, level)
outfile.write('nick_name="%s",\n' % (self.nick_name,))
super(java_programmer, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(java_programmer, self).exportLiteralChildren(outfile, level, name_)
if self.favorite_editor is not None:
showIndent(outfile, level)
outfile.write('favorite_editor=%s,\n' % quote_python(self.favorite_editor).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('status', node)
if value is not None and 'status' not in already_processed:
already_processed.add('status')
self.status = value
value = find_attr_value_('nick-name', node)
if value is not None and 'nick-name' not in already_processed:
already_processed.add('nick-name')
self.nick_name = value
super(java_programmer, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'favorite-editor':
favorite_editor_ = child_.text
favorite_editor_ = self.gds_validate_string(favorite_editor_, node, 'favorite_editor')
self.favorite_editor = favorite_editor_
super(java_programmer, self).buildChildren(child_, node, nodeName_, True)
def walk_and_update(self):
members = java_programmer._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if java_programmer.superclass != None:
java_programmer.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: java_programmer depth: %d' % (counter, depth, )
members = java_programmer._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class java_programmer
class agent(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('firstname', 'xs:string', 0),
MemberSpec_('lastname', 'xs:string', 0),
MemberSpec_('priority', 'xs:float', 0),
MemberSpec_('info', 'info', 0),
]
subclass = None
superclass = None
def __init__(self, firstname=None, lastname=None, priority=None, info=None):
self.firstname = firstname
self.lastname = lastname
self.priority = priority
self.info = info
def factory(*args_, **kwargs_):
if agent.subclass:
return agent.subclass(*args_, **kwargs_)
else:
return agent(*args_, **kwargs_)
factory = staticmethod(factory)
def get_firstname(self): return self.firstname
def set_firstname(self, firstname): self.firstname = firstname
def get_lastname(self): return self.lastname
def set_lastname(self, lastname): self.lastname = lastname
def get_priority(self): return self.priority
def set_priority(self, priority): self.priority = priority
def get_info(self): return self.info
def set_info(self, info): self.info = info
def hasContent_(self):
if (
self.firstname is not None or
self.lastname is not None or
self.priority is not None or
self.info is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='agent', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='agent')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='agent'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='agent', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.firstname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfirstname>%s</%sfirstname>%s' % (namespace_, self.gds_format_string(quote_xml(self.firstname).encode(ExternalEncoding), input_name='firstname'), namespace_, eol_))
if self.lastname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slastname>%s</%slastname>%s' % (namespace_, self.gds_format_string(quote_xml(self.lastname).encode(ExternalEncoding), input_name='lastname'), namespace_, eol_))
if self.priority is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spriority>%s</%spriority>%s' % (namespace_, self.gds_format_float(self.priority, input_name='priority'), namespace_, eol_))
if self.info is not None:
self.info.export(outfile, level, namespace_, name_='info', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='agent'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.firstname is not None:
showIndent(outfile, level)
outfile.write('firstname=%s,\n' % quote_python(self.firstname).encode(ExternalEncoding))
if self.lastname is not None:
showIndent(outfile, level)
outfile.write('lastname=%s,\n' % quote_python(self.lastname).encode(ExternalEncoding))
if self.priority is not None:
showIndent(outfile, level)
outfile.write('priority=%f,\n' % self.priority)
if self.info is not None:
showIndent(outfile, level)
outfile.write('info=model_.info(\n')
self.info.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'firstname':
firstname_ = child_.text
firstname_ = self.gds_validate_string(firstname_, node, 'firstname')
self.firstname = firstname_
elif nodeName_ == 'lastname':
lastname_ = child_.text
lastname_ = self.gds_validate_string(lastname_, node, 'lastname')
self.lastname = lastname_
elif nodeName_ == 'priority':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'priority')
self.priority = fval_
elif nodeName_ == 'info':
obj_ = info.factory()
obj_.build(child_)
self.info = obj_
def walk_and_update(self):
members = agent._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if agent.superclass != None:
agent.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: agent depth: %d' % (counter, depth, )
members = agent._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class agent
class special_agent(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('firstname', 'xs:string', 0),
MemberSpec_('lastname', 'xs:string', 0),
MemberSpec_('priority', 'xs:float', 0),
MemberSpec_('info', 'info', 0),
]
subclass = None
superclass = None
def __init__(self, firstname=None, lastname=None, priority=None, info=None):
self.firstname = firstname
self.lastname = lastname
self.priority = priority
self.info = info
def factory(*args_, **kwargs_):
if special_agent.subclass:
return special_agent.subclass(*args_, **kwargs_)
else:
return special_agent(*args_, **kwargs_)
factory = staticmethod(factory)
def get_firstname(self): return self.firstname
def set_firstname(self, firstname): self.firstname = firstname
def get_lastname(self): return self.lastname
def set_lastname(self, lastname): self.lastname = lastname
def get_priority(self): return self.priority
def set_priority(self, priority): self.priority = priority
def get_info(self): return self.info
def set_info(self, info): self.info = info
def hasContent_(self):
if (
self.firstname is not None or
self.lastname is not None or
self.priority is not None or
self.info is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='special-agent', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='special-agent')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='special-agent'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='special-agent', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.firstname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfirstname>%s</%sfirstname>%s' % (namespace_, self.gds_format_string(quote_xml(self.firstname).encode(ExternalEncoding), input_name='firstname'), namespace_, eol_))
if self.lastname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slastname>%s</%slastname>%s' % (namespace_, self.gds_format_string(quote_xml(self.lastname).encode(ExternalEncoding), input_name='lastname'), namespace_, eol_))
if self.priority is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%spriority>%s</%spriority>%s' % (namespace_, self.gds_format_float(self.priority, input_name='priority'), namespace_, eol_))
if self.info is not None:
self.info.export(outfile, level, namespace_, name_='info', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='special-agent'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.firstname is not None:
showIndent(outfile, level)
outfile.write('firstname=%s,\n' % quote_python(self.firstname).encode(ExternalEncoding))
if self.lastname is not None:
showIndent(outfile, level)
outfile.write('lastname=%s,\n' % quote_python(self.lastname).encode(ExternalEncoding))
if self.priority is not None:
showIndent(outfile, level)
outfile.write('priority=%f,\n' % self.priority)
if self.info is not None:
showIndent(outfile, level)
outfile.write('info=model_.info(\n')
self.info.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'firstname':
firstname_ = child_.text
firstname_ = self.gds_validate_string(firstname_, node, 'firstname')
self.firstname = firstname_
elif nodeName_ == 'lastname':
lastname_ = child_.text
lastname_ = self.gds_validate_string(lastname_, node, 'lastname')
self.lastname = lastname_
elif nodeName_ == 'priority':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'priority')
self.priority = fval_
elif nodeName_ == 'info':
obj_ = info.factory()
obj_.build(child_)
self.info = obj_
def walk_and_update(self):
members = special_agent._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if special_agent.superclass != None:
special_agent.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: special_agent depth: %d' % (counter, depth, )
members = special_agent._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class special_agent
class booster(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('firstname', 'xs:string', 0),
MemberSpec_('lastname', 'xs:string', 0),
MemberSpec_('other_name', 'xs:float', 0),
MemberSpec_('class_', 'xs:float', 0),
MemberSpec_('other_value', 'xs:float', 1),
MemberSpec_('type_', 'xs:float', 1),
MemberSpec_('client_handler', 'client-handlerType', 1),
]
subclass = None
superclass = None
def __init__(self, firstname=None, lastname=None, other_name=None, class_=None, other_value=None, type_=None, client_handler=None):
self.firstname = firstname
self.lastname = lastname
self.other_name = other_name
self.class_ = class_
if other_value is None:
self.other_value = []
else:
self.other_value = other_value
if type_ is None:
self.type_ = []
else:
self.type_ = type_
if client_handler is None:
self.client_handler = []
else:
self.client_handler = client_handler
def factory(*args_, **kwargs_):
if booster.subclass:
return booster.subclass(*args_, **kwargs_)
else:
return booster(*args_, **kwargs_)
factory = staticmethod(factory)
def get_firstname(self): return self.firstname
def set_firstname(self, firstname): self.firstname = firstname
def get_lastname(self): return self.lastname
def set_lastname(self, lastname): self.lastname = lastname
def get_other_name(self): return self.other_name
def set_other_name(self, other_name): self.other_name = other_name
def get_class(self): return self.class_
def set_class(self, class_): self.class_ = class_
def get_other_value(self): return self.other_value
def set_other_value(self, other_value): self.other_value = other_value
def add_other_value(self, value): self.other_value.append(value)
def insert_other_value(self, index, value): self.other_value[index] = value
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def add_type(self, value): self.type_.append(value)
def insert_type(self, index, value): self.type_[index] = value
def get_client_handler(self): return self.client_handler
def set_client_handler(self, client_handler): self.client_handler = client_handler
def add_client_handler(self, value): self.client_handler.append(value)
def insert_client_handler(self, index, value): self.client_handler[index] = value
def hasContent_(self):
if (
self.firstname is not None or
self.lastname is not None or
self.other_name is not None or
self.class_ is not None or
self.other_value or
self.type_ or
self.client_handler
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='booster', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='booster')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='booster'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='booster', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.firstname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfirstname>%s</%sfirstname>%s' % (namespace_, self.gds_format_string(quote_xml(self.firstname).encode(ExternalEncoding), input_name='firstname'), namespace_, eol_))
if self.lastname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%slastname>%s</%slastname>%s' % (namespace_, self.gds_format_string(quote_xml(self.lastname).encode(ExternalEncoding), input_name='lastname'), namespace_, eol_))
if self.other_name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sother-name>%s</%sother-name>%s' % (namespace_, self.gds_format_float(self.other_name, input_name='other-name'), namespace_, eol_))
if self.class_ is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sclass>%s</%sclass>%s' % (namespace_, self.gds_format_float(self.class_, input_name='class'), namespace_, eol_))
for other_value_ in self.other_value:
showIndent(outfile, level, pretty_print)
outfile.write('<%sother-value>%s</%sother-value>%s' % (namespace_, self.gds_format_float(other_value_, input_name='other-value'), namespace_, eol_))
for type_ in self.type_:
showIndent(outfile, level, pretty_print)
outfile.write('<%stype>%s</%stype>%s' % (namespace_, self.gds_format_float(type_, input_name='type'), namespace_, eol_))
for client_handler_ in self.client_handler:
client_handler_.export(outfile, level, namespace_, name_='client-handler', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='booster'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.firstname is not None:
showIndent(outfile, level)
outfile.write('firstname=%s,\n' % quote_python(self.firstname).encode(ExternalEncoding))
if self.lastname is not None:
showIndent(outfile, level)
outfile.write('lastname=%s,\n' % quote_python(self.lastname).encode(ExternalEncoding))
if self.other_name is not None:
showIndent(outfile, level)
outfile.write('other_name=%f,\n' % self.other_name)
if self.class_ is not None:
showIndent(outfile, level)
outfile.write('class_=%f,\n' % self.class_)
showIndent(outfile, level)
outfile.write('other_value=[\n')
level += 1
for other_value_ in self.other_value:
showIndent(outfile, level)
outfile.write('%f,\n' % other_value_)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('type_=[\n')
level += 1
for type_ in self.type_:
showIndent(outfile, level)
outfile.write('%f,\n' % type_)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('client_handler=[\n')
level += 1
for client_handler_ in self.client_handler:
showIndent(outfile, level)
outfile.write('model_.client_handlerType(\n')
client_handler_.exportLiteral(outfile, level, name_='client-handlerType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'firstname':
firstname_ = child_.text
firstname_ = self.gds_validate_string(firstname_, node, 'firstname')
self.firstname = firstname_
elif nodeName_ == 'lastname':
lastname_ = child_.text
lastname_ = self.gds_validate_string(lastname_, node, 'lastname')
self.lastname = lastname_
elif nodeName_ == 'other-name':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'other_name')
self.other_name = fval_
elif nodeName_ == 'class':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'class')
self.class_ = fval_
elif nodeName_ == 'other-value':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'other_value')
self.other_value.append(fval_)
elif nodeName_ == 'type':
sval_ = child_.text
try:
fval_ = float(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires float or double: %s' % exp)
fval_ = self.gds_validate_float(fval_, node, 'type')
self.type_.append(fval_)
elif nodeName_ == 'client-handler':
obj_ = client_handlerType.factory()
obj_.build(child_)
self.client_handler.append(obj_)
def walk_and_update(self):
members = booster._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if booster.superclass != None:
booster.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: booster depth: %d' % (counter, depth, )
members = booster._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class booster
class info(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('rating', 'xs:float', 0),
MemberSpec_('type', 'xs:integer', 0),
MemberSpec_('name', 'xs:string', 0),
]
subclass = None
superclass = None
def __init__(self, rating=None, type_=None, name=None):
self.rating = _cast(float, rating)
self.type_ = _cast(int, type_)
self.name = _cast(None, name)
pass
def factory(*args_, **kwargs_):
if info.subclass:
return info.subclass(*args_, **kwargs_)
else:
return info(*args_, **kwargs_)
factory = staticmethod(factory)
def get_rating(self): return self.rating
def set_rating(self, rating): self.rating = rating
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_name(self): return self.name
def set_name(self, name): self.name = name
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='info', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='info')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='info'):
if self.rating is not None and 'rating' not in already_processed:
already_processed.add('rating')
outfile.write(' rating="%s"' % self.gds_format_float(self.rating, input_name='rating'))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type="%s"' % self.gds_format_integer(self.type_, input_name='type'))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='', name_='info', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='info'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.rating is not None and 'rating' not in already_processed:
already_processed.add('rating')
showIndent(outfile, level)
outfile.write('rating=%f,\n' % (self.rating,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_=%d,\n' % (self.type_,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('rating', node)
if value is not None and 'rating' not in already_processed:
already_processed.add('rating')
try:
self.rating = float(value)
except ValueError, exp:
raise ValueError('Bad float/double attribute (rating): %s' % exp)
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
try:
self.type_ = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
def walk_and_update(self):
members = info._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if info.superclass != None:
info.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: info depth: %d' % (counter, depth, )
members = info._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class info
class client_handlerType(GeneratedsSuper):
member_data_items_ = [
MemberSpec_('fullname', 'xs:string', 0),
MemberSpec_('refid', 'xs:integer', 0),
]
subclass = None
superclass = None
def __init__(self, fullname=None, refid=None):
self.fullname = fullname
self.refid = refid
def factory(*args_, **kwargs_):
if client_handlerType.subclass:
return client_handlerType.subclass(*args_, **kwargs_)
else:
return client_handlerType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_fullname(self): return self.fullname
def set_fullname(self, fullname): self.fullname = fullname
def get_refid(self): return self.refid
def set_refid(self, refid): self.refid = refid
def hasContent_(self):
if (
self.fullname is not None or
self.refid is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='client-handlerType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='client-handlerType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='client-handlerType'):
pass
def exportChildren(self, outfile, level, namespace_='', name_='client-handlerType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.fullname is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sfullname>%s</%sfullname>%s' % (namespace_, self.gds_format_string(quote_xml(self.fullname).encode(ExternalEncoding), input_name='fullname'), namespace_, eol_))
if self.refid is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%srefid>%s</%srefid>%s' % (namespace_, self.gds_format_integer(self.refid, input_name='refid'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='client-handlerType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.fullname is not None:
showIndent(outfile, level)
outfile.write('fullname=%s,\n' % quote_python(self.fullname).encode(ExternalEncoding))
if self.refid is not None:
showIndent(outfile, level)
outfile.write('refid=%d,\n' % self.refid)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'fullname':
fullname_ = child_.text
fullname_ = self.gds_validate_string(fullname_, node, 'fullname')
self.fullname = fullname_
elif nodeName_ == 'refid':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'refid')
self.refid = ival_
def walk_and_update(self):
members = client_handlerType._member_data_items
for member in members:
obj1 = getattr(self, member.get_name())
if member.get_data_type() == 'xs:date':
newvalue = date_calcs.date_from_string(obj1)
setattr(self, member.get_name(), newvalue)
elif member.get_container():
for child in obj1:
if type(child) == types.InstanceType:
child.walk_and_update()
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_update()
if client_handlerType.superclass != None:
client_handlerType.superclass.walk_and_update(self)
def walk_and_show(self, depth):
global counter
counter += 1
depth += 1
print '%d. class: client_handlerType depth: %d' % (counter, depth, )
members = client_handlerType._member_data_items
for member in members:
s1 = member.get_name()
s2 = member.get_data_type()
s3 = '%d' % member.get_container()
obj1 = getattr(self, member.get_name())
if member.get_container():
s4 = '<container>'
else:
if type(obj1) != types.InstanceType:
s4 = '%s' % obj1
else:
s4 = '<instance>'
s5 = '%s%s%s %s' % (s1.ljust(16), s2.ljust(16), s3.rjust(4), s4, )
print ' ', s5
for member in members:
if member.get_container():
for child in getattr(self, member.get_name()):
if type(child) == types.InstanceType:
child.walk_and_show(depth)
else:
obj1 = getattr(self, member.get_name())
if type(obj1) == types.InstanceType:
obj1.walk_and_show(depth)
# end class client_handlerType
GDSClassesMapping = {
'client-handler': client_handlerType,
'elparam': param,
'promoter': booster,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
roots = get_root_tag(rootNode)
rootClass = roots[1]
if rootClass is None:
rootClass = people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_="people",
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'people'
rootClass = people
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from out2_sup import *\n\n')
sys.stdout.write('import out2_sup as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"agent",
"booster",
"client_handlerType",
"comments",
"info",
"java_programmer",
"param",
"people",
"person",
"programmer",
"python_programmer",
"special_agent"
]
|
ricksladkey/generateDS
|
tests/out2_sup.py
|
Python
|
mit
| 153,816
|
import json
from prismriver import util, main
class SongJsonEncoder(json.JSONEncoder):
def default(self, o):
return o.__dict__
def format_output(songs, output_format, txt_template=None):
if output_format == 'txt':
formatted_songs = []
for song in songs:
lyrics_txt = ''
if song.lyrics:
index = 0
for lyric in song.lyrics:
lyrics_txt += lyric
if index < len(song.lyrics) - 1:
lyrics_txt += '\n\n<<< --- --- --- >>>\n\n'
index += 1
result = txt_template
result = result.replace('%TITLE%', song.title)
result = result.replace('%ARTIST%', song.artist)
result = result.replace('%PLUGIN_ID%', song.plugin_name)
result = result.replace('%PLUGIN_NAME%', song.plugin_name)
result = result.replace('%LYRICS%', lyrics_txt)
formatted_songs.append(result)
result = ''
index = 0
for formatted_song in formatted_songs:
result += formatted_song
if index < len(formatted_songs) - 1:
result += '\n\n<<< --- --- --- --- --- >>>\n\n'
index += 1
return result
elif output_format == 'json':
return json.dumps(songs, cls=SongJsonEncoder, sort_keys=True, indent=4, ensure_ascii=False)
elif output_format == 'json_ascii':
return json.dumps(songs, cls=SongJsonEncoder, sort_keys=True, indent=4)
else:
pass
def list_plugins():
plugins = main.get_plugins()
plugins.sort(key=lambda x: x.plugin_name.lower())
for plugin in plugins:
print('{:<20} [id: {}]'.format(plugin.plugin_name, plugin.ID))
def run():
parser = util.init_args_parser()
parser.add_argument('--list', action='store_true', help='list available search plugins')
parser.add_argument('--song', action='store_true',
help='search for song information by artist and title (default action)')
parser.add_argument('--cleanup', action='store_true', help='remove outdated files from cache')
parser.add_argument("-f", "--format", type=str, default='txt',
help="lyrics output format (txt (default), json, json_ascii)")
parser.add_argument("-o", "--output", type=str, default='%ARTIST% - %TITLE%\nSource: %PLUGIN_NAME%\n\n%LYRICS%',
help="output template for txt format. Available parameters: "
"%%TITLE%% - song title, "
"%%ARTIST%% - song artist, "
"%%LYRICS%% - song lyrics, "
"%%PLUGIN_ID%% - plugin id, "
"%%PLUGIN_NAME%% - plugin name "
"(default value: %%ARTIST%% - %%TITLE%%\\nSource: %%PLUGIN_NAME%%\\n\\n%%LYRICS%%)"
)
params = parser.parse_args()
util.init_logging(params.quiet, params.verbose, params.log)
util.log_debug_info(params)
config = util.init_search_config(params)
util.log_config_info(config)
if params.list:
list_plugins()
elif params.cleanup:
main.cleanup_cache(config)
else:
result = main.search(params.artist, params.title, config)
if result:
print(format_output(result, params.format, params.output))
|
anlar/prismriver
|
prismriver/cli.py
|
Python
|
mit
| 3,443
|
import cStringIO
import os
import tarfile
import zipfile
def test_download(data_builder, file_form, as_admin, api_db):
project = data_builder.create_project(label='project1')
session = data_builder.create_session(label='session1')
session2 = data_builder.create_session(label='session1')
session3 = data_builder.create_session(label='session1')
acquisition = data_builder.create_acquisition(session=session)
acquisition2 = data_builder.create_acquisition(session=session2)
acquisition3 = data_builder.create_acquisition(session=session3)
# upload the same file to each container created and use different tags to
# facilitate download filter tests:
# acquisition: [], session: ['plus'], project: ['plus', 'minus']
file_name = 'test.csv'
as_admin.post('/acquisitions/' + acquisition + '/files', files=file_form(
file_name, meta={'name': file_name, 'type': 'csv'}))
as_admin.post('/acquisitions/' + acquisition2 + '/files', files=file_form(
file_name, meta={'name': file_name, 'type': 'csv'}))
as_admin.post('/acquisitions/' + acquisition3 + '/files', files=file_form(
'test.txt', meta={'name': file_name, 'type': 'text'}))
as_admin.post('/sessions/' + session + '/files', files=file_form(
file_name, meta={'name': file_name, 'type': 'csv', 'tags': ['plus']}))
as_admin.post('/projects/' + project + '/files', files=file_form(
file_name, meta={'name': file_name, 'type': 'csv', 'tags': ['plus', 'minus']}))
missing_object_id = '000000000000000000000000'
# Try to download w/ nonexistent ticket
r = as_admin.get('/download', params={'ticket': missing_object_id})
assert r.status_code == 404
# Retrieve a ticket for a batch download
r = as_admin.post('/download', json={
'optional': False,
'filters': [{'tags': {
'-': ['minus']
}}],
'nodes': [
{'level': 'project', '_id': project},
]
})
assert r.ok
ticket = r.json()['ticket']
# Perform the download
r = as_admin.get('/download', params={'ticket': ticket})
assert r.ok
tar_file = cStringIO.StringIO(r.content)
tar = tarfile.open(mode="r", fileobj=tar_file)
# Verify a single file in tar with correct file name
found_second_session = False
found_third_session = False
for tarinfo in tar:
assert os.path.basename(tarinfo.name) == file_name
if 'session1_0' in str(tarinfo.name):
found_second_session = True
if 'session1_1' in str(tarinfo.name):
found_third_session = True
assert found_second_session
assert found_third_session
tar.close()
# Download one session with many acquisitions and make sure they are in the same subject folder
acquisition3 = data_builder.create_acquisition(session=session)
r = as_admin.post('/acquisitions/' + acquisition3 + '/files', files=file_form(
file_name, meta={'name': file_name, 'type': 'csv'}))
assert r.ok
r = as_admin.post('/download', json={
'optional': False,
'nodes': [
{'level': 'acquisition', '_id': acquisition},
{'level': 'acquisition', '_id': acquisition3},
]
})
assert r.ok
ticket = r.json()['ticket']
# Perform the download
r = as_admin.get('/download', params={'ticket': ticket})
assert r.ok
tar_file = cStringIO.StringIO(r.content)
tar = tarfile.open(mode="r", fileobj=tar_file)
# Verify a single file in tar with correct file name
found_second_session = False
for tarinfo in tar:
assert os.path.basename(tarinfo.name) == file_name
if 'session1_0' in str(tarinfo.name):
found_second_session = True
assert not found_second_session
tar.close()
# Try to perform the download from a different IP
update_result = api_db.downloads.update_one(
{'_id': ticket},
{'$set': {'ip': '0.0.0.0'}})
assert update_result.modified_count == 1
r = as_admin.get('/download', params={'ticket': ticket})
assert r.status_code == 400
# Try to retrieve a ticket referencing nonexistent containers
r = as_admin.post('/download', json={
'optional': False,
'nodes': [
{'level': 'project', '_id': missing_object_id},
{'level': 'session', '_id': missing_object_id},
{'level': 'acquisition', '_id': missing_object_id},
]
})
assert r.status_code == 404
# Try to retrieve ticket for bulk download w/ invalid container name
# (not project|session|acquisition)
r = as_admin.post('/download', params={'bulk': 'true'}, json={
'files': [{'container_name': 'subject', 'container_id': missing_object_id, 'filename': 'nosuch.csv'}]
})
assert r.status_code == 400
# Try to retrieve ticket for bulk download referencing nonexistent file
r = as_admin.post('/download', params={'bulk': 'true'}, json={
'files': [{'container_name': 'project', 'container_id': project, 'filename': 'nosuch.csv'}]
})
assert r.status_code == 404
# Retrieve ticket for bulk download
r = as_admin.post('/download', params={'bulk': 'true'}, json={
'files': [{'container_name': 'project', 'container_id': project, 'filename': file_name}]
})
assert r.ok
ticket = r.json()['ticket']
# Perform the download using symlinks
r = as_admin.get('/download', params={'ticket': ticket, 'symlinks': 'true'})
assert r.ok
def test_filelist_download(data_builder, file_form, as_admin):
session = data_builder.create_session()
zip_cont = cStringIO.StringIO()
with zipfile.ZipFile(zip_cont, 'w') as zip_file:
zip_file.writestr('two.csv', 'sample\ndata\n')
zip_cont.seek(0)
session_files = '/sessions/' + session + '/files'
as_admin.post(session_files, files=file_form('one.csv'))
as_admin.post(session_files, files=file_form(('two.zip', zip_cont)))
# try to get non-existent file
r = as_admin.get(session_files + '/non-existent.csv')
assert r.status_code == 404
# try to get file w/ non-matching hash
r = as_admin.get(session_files + '/one.csv', params={'hash': 'match me if you can'})
assert r.status_code == 409
# get download ticket for single file
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# download single file w/ ticket
r = as_admin.get(session_files + '/one.csv', params={'ticket': ticket})
assert r.ok
# try to get zip info for non-zip file
r = as_admin.get(session_files + '/one.csv', params={'ticket': ticket, 'info': 'true'})
assert r.status_code == 400
# try to get zip member of non-zip file
r = as_admin.get(session_files + '/one.csv', params={'ticket': ticket, 'member': 'hardly'})
assert r.status_code == 400
# try to download a different file w/ ticket
r = as_admin.get(session_files + '/two.zip', params={'ticket': ticket})
assert r.status_code == 400
# get download ticket for zip file
r = as_admin.get(session_files + '/two.zip', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# get zip info
r = as_admin.get(session_files + '/two.zip', params={'ticket': ticket, 'info': 'true'})
assert r.ok
# try to get non-existent zip member
r = as_admin.get(session_files + '/two.zip', params={'ticket': ticket, 'member': 'hardly'})
assert r.status_code == 400
# get zip member
r = as_admin.get(session_files + '/two.zip', params={'ticket': ticket, 'member': 'two.csv'})
assert r.ok
def test_filelist_range_download(data_builder, as_admin, file_form):
session = data_builder.create_session()
session_files = '/sessions/' + session + '/files'
as_admin.post(session_files, files=file_form(('one.csv', '123456789')))
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# try to download single file's first byte w/o feature flag
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket},
headers={'Range': 'bytes=0-0'})
assert r.ok
assert r.content == '123456789'
# download single file from byte 0 to end of file
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=0-'})
assert r.ok
assert r.content == '123456789'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# download single file's first byte by using lower case header
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'range': 'bytes=0-0'})
assert r.ok
assert r.content == '1'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# download single file's first two byte
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=0-1'})
assert r.ok
assert r.content == '12'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# download single file's last 5 bytes
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=-5'})
assert r.ok
assert r.content == '56789'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# try to download single file with invalid unit
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'lol=-5'})
assert r.status_code == 200
assert r.content == '123456789'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# try to download single file with invalid range where the last byte is greater then the size of the file
# in this case the whole file is returned
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=0-500'})
assert r.status_code == 200
assert r.content == '123456789'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# try to download single file with invalid range where the first byte is greater then the size of the file
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=500-'})
assert r.status_code == 416
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# try to download single file with invalid range, in this case the whole file is returned
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=-'})
assert r.status_code == 200
assert r.content == '123456789'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# try to download single file with invalid range first byte is greater than the last one
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=10-5'})
assert r.status_code == 200
assert r.content == '123456789'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# try to download single file with invalid range, can't parse first byte
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=r-0'})
assert r.status_code == 200
assert r.content == '123456789'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# try to download single file with invalid range, can't parse last byte
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=0-bb'})
assert r.status_code == 200
assert r.content == '123456789'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# try to download single file with invalid range syntax
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=1+5'})
assert r.status_code == 200
assert r.content == '123456789'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# try to download single file with invalid range header syntax
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes-1+5'})
assert r.status_code == 200
assert r.content == '123456789'
r = as_admin.get(session_files + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# download multiple ranges
r = as_admin.get(session_files + '/one.csv',
params={'ticket': ticket, 'view': 'true'},
headers={'Range': 'bytes=1-2, 3-4'})
assert r.ok
boundary = r.headers.get('Content-Type').split('boundary=')[1]
assert r.content == '--{0}\n' \
'Content-Type: text/csv\n' \
'Content-Range: bytes 1-2/9\n\n' \
'23\n' \
'--{0}\n' \
'Content-Type: text/csv\n' \
'Content-Range: bytes 3-4/9\n\n' \
'45\n'.format(boundary)
def test_analysis_download(data_builder, file_form, as_admin, as_drone, default_payload):
session = data_builder.create_session()
zip_cont = cStringIO.StringIO()
with zipfile.ZipFile(zip_cont, 'w') as zip_file:
zip_file.writestr('two.csv', 'sample\ndata\n')
zip_cont.seek(0)
# create (legacy) analysis for testing the download functionality
r = as_admin.post('/sessions/' + session + '/analyses', files=file_form('one.csv', ('two.zip', zip_cont), meta={
'label': 'test',
'inputs': [{'name': 'one.csv'}],
'outputs': [{'name': 'two.zip'}],
}))
assert r.ok
analysis = r.json()['_id']
analysis_inputs = '/sessions/' + session + '/analyses/' + analysis + '/inputs'
analysis_outputs = '/sessions/' + session + '/analyses/' + analysis + '/files'
new_analysis_inputs = '/analyses/' + analysis + '/inputs'
new_analysis_outputs = '/analyses/' + analysis + '/files'
# Check that analysis inputs are placed under the inputs key
r = as_admin.get('/sessions/' + session + '/analyses/' + analysis)
assert r.ok
assert [f['name'] for f in r.json().get('inputs', [])] == ['one.csv']
# try to download analysis inputs w/ non-existent ticket
r = as_admin.get(analysis_inputs, params={'ticket': '000000000000000000000000'})
assert r.status_code == 404
# get analysis batch download ticket for all inputs
r = as_admin.get(analysis_inputs, params={'ticket': ''}, json={"optional":True,"nodes":[{"level":"analysis","_id":analysis}]})
assert r.ok
ticket = r.json()['ticket']
# filename is analysis_<label> not analysis_<_id>
assert r.json()['filename'] == 'analysis_test.tar'
# batch download analysis inputs w/ ticket from wrong endpoint
r = as_admin.get(analysis_inputs, params={'ticket': ticket})
assert r.status_code == 400
# batch download analysis inputs w/ ticket from correct endpoint
r = as_admin.get('/download', params={'ticket': ticket})
assert r.ok
# Check to make sure outputs are in tar
with tarfile.open(mode='r', fileobj=cStringIO.StringIO(r.content)) as tar:
assert [m.name for m in tar.getmembers()] == ['test/input/one.csv']
### Using '/download' endpoint only - for analysis outputs only! ###
# try to download analysis outputs w/ non-existent ticket
r = as_admin.get('/download', params={'ticket': '000000000000000000000000'})
assert r.status_code == 404
# get analysis batch download ticket for all outputs
r = as_admin.get('/download', params={'ticket': ''}, json={"optional":True,"nodes":[{"level":"analysis","_id":analysis}]})
assert r.ok
ticket = r.json()['ticket']
# filename is analysis_<label> not analysis_<_id>
assert r.json()['filename'] == 'analysis_test.tar'
# batch download analysis outputs w/ ticket
r = as_admin.get('/download', params={'ticket': ticket})
assert r.ok
# Check to make sure inputs and outputs are in tar
with tarfile.open(mode='r', fileobj=cStringIO.StringIO(r.content)) as tar:
assert set([m.name for m in tar.getmembers()]) == set(['test/input/one.csv', 'test/output/two.zip'])
# try to get download ticket for non-existent analysis file
r = as_admin.get(analysis_inputs + '/non-existent.csv')
assert r.status_code == 404
# get analysis download ticket for single file
r = as_admin.get(analysis_inputs + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# download single analysis file w/ ticket
r = as_admin.get(analysis_inputs + '/one.csv', params={'ticket': ticket})
assert r.ok
# try to get zip info for non-zip file
r = as_admin.get(analysis_inputs + '/one.csv', params={'ticket': ticket, 'info': 'true'})
assert r.status_code == 400
# try to get zip member of non-zip file
r = as_admin.get(analysis_inputs + '/one.csv', params={'ticket': ticket, 'member': 'nosuch'})
assert r.status_code == 400
# try to download a different file w/ ticket
r = as_admin.get(analysis_outputs + '/two.zip', params={'ticket': ticket})
assert r.status_code == 400
# get analysis download ticket for zip file
r = as_admin.get(analysis_outputs + '/two.zip', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# get zip info
r = as_admin.get(analysis_outputs + '/two.zip', params={'ticket': ticket, 'info': 'true'})
assert r.ok
# try to get non-existent zip member
r = as_admin.get(analysis_outputs + '/two.zip', params={'ticket': ticket, 'member': 'nosuch'})
assert r.status_code == 400
# get zip member
r = as_admin.get(analysis_outputs + '/two.zip', params={'ticket': ticket, 'member': 'two.csv'})
assert r.ok
### single file analysis download using FileListHandler ###
# try to get download ticket for non-existent analysis file
r = as_admin.get(new_analysis_inputs + '/non-existent.csv')
assert r.status_code == 404
# get analysis download ticket for single file
r = as_admin.get(new_analysis_inputs + '/one.csv', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# download single analysis file w/ ticket
r = as_admin.get(new_analysis_inputs + '/one.csv', params={'ticket': ticket})
assert r.ok
# try to get zip info for non-zip file
r = as_admin.get(new_analysis_inputs + '/one.csv', params={'ticket': ticket, 'info': 'true'})
assert r.status_code == 400
# try to get zip member of non-zip file
r = as_admin.get(new_analysis_inputs + '/one.csv', params={'ticket': ticket, 'member': 'nosuch'})
assert r.status_code == 400
# try to download a different file w/ ticket
r = as_admin.get(new_analysis_outputs + '/two.zip', params={'ticket': ticket})
assert r.status_code == 400
# get analysis download ticket for zip file
r = as_admin.get(new_analysis_outputs + '/two.zip', params={'ticket': ''})
assert r.ok
ticket = r.json()['ticket']
# get zip info
r = as_admin.get(new_analysis_outputs + '/two.zip', params={'ticket': ticket, 'info': 'true'})
assert r.ok
# try to get non-existent zip member
r = as_admin.get(new_analysis_outputs + '/two.zip', params={'ticket': ticket, 'member': 'nosuch'})
assert r.status_code == 400
# get zip member
r = as_admin.get(new_analysis_outputs + '/two.zip', params={'ticket': ticket, 'member': 'two.csv'})
assert r.ok
# delete session analysis (job)
r = as_admin.delete('/sessions/' + session + '/analyses/' + analysis)
assert r.ok
def test_filters(data_builder, file_form, as_admin):
project = data_builder.create_project()
session = data_builder.create_session()
acquisition = data_builder.create_acquisition()
acquisition2 = data_builder.create_acquisition()
as_admin.post('/acquisitions/' + acquisition + '/files', files=file_form(
"test.csv", meta={'name': "test.csv", 'type': 'csv', 'tags': ['red', 'blue']}))
as_admin.post('/acquisitions/' + acquisition + '/files', files=file_form(
'test.dicom', meta={'name': 'test.dicom', 'type': 'dicom', 'tags': ['red']}))
as_admin.post('/acquisitions/' + acquisition2 + '/files', files=file_form(
'test.nifti', meta={'name': 'test.nifti', 'type': 'nifti'}))
r = as_admin.get('/acquisitions/' + acquisition)
assert r.ok
# Malformed filters
r = as_admin.post('/download', json={
'optional': False,
'filters': [
{'tags': 'red'}
],
'nodes': [
{'level': 'session', '_id': session},
]
})
assert r.status_code == 400
# No filters
r = as_admin.post('/download', json={
'optional': False,
'nodes': [
{'level': 'session', '_id': session},
]
})
assert r.ok
assert r.json()['file_cnt'] == 3
# Filter by tags
r = as_admin.post('/download', json={
'optional': False,
'filters': [
{'tags': {'+':['red']}}
],
'nodes': [
{'level': 'session', '_id': session},
]
})
assert r.ok
assert r.json()['file_cnt'] == 2
# Use filter aliases
r = as_admin.post('/download', json={
'optional': False,
'filters': [
{'tags': {'plus':['red']}}
],
'nodes': [
{'level': 'session', '_id': session},
]
})
assert r.ok
# 'plus' is same as using '+'
assert r.json()['file_cnt'] == 2
# Filter by type
as_admin.post('/acquisitions/' + acquisition + '/files', files=file_form(
"test", meta={'name': "test", 'tags': ['red', 'blue']}))
r = as_admin.post('/download', json={
'optional': False,
'filters': [
{'types': {'+':['nifti']}}
],
'nodes': [
{'level': 'session', '_id': session},
]
})
assert r.ok
assert r.json()['file_cnt'] == 1
r = as_admin.post('/download', json={
'optional': False,
'filters': [
{'types': {'+':['null']}}
],
'nodes': [
{'level': 'session', '_id': session},
]
})
assert r.ok
assert r.json()['file_cnt'] == 1
def test_summary(data_builder, as_admin, file_form):
project = data_builder.create_project(label='project1')
session = data_builder.create_session(label='session1')
session2 = data_builder.create_session(label='session1')
acquisition = data_builder.create_acquisition(session=session)
acquisition2 = data_builder.create_acquisition(session=session2)
# upload the same file to each container created and use different tags to
# facilitate download filter tests:
# acquisition: [], session: ['plus'], project: ['plus', 'minus']
file_name = 'test.csv'
as_admin.post('/acquisitions/' + acquisition + '/files', files=file_form(
file_name, meta={'name': file_name, 'type': 'csv'}))
as_admin.post('/acquisitions/' + acquisition2 + '/files', files=file_form(
file_name, meta={'name': file_name, 'type': 'csv'}))
as_admin.post('/sessions/' + session + '/files', files=file_form(
file_name, meta={'name': file_name, 'type': 'csv', 'tags': ['plus']}))
as_admin.post('/projects/' + project + '/files', files=file_form(
file_name, meta={'name': file_name, 'type': 'csv', 'tags': ['plus', 'minus']}))
missing_object_id = '000000000000000000000000'
r = as_admin.post('/download/summary', json=[{"level":"project", "_id":project}])
assert r.ok
assert len(r.json()) == 1
assert r.json().get("csv", {}).get("count",0) == 4
r = as_admin.post('/download/summary', json=[{"level":"session", "_id":session}])
assert r.ok
assert len(r.json()) == 1
assert r.json().get("csv", {}).get("count",0) == 2
r = as_admin.post('/download/summary', json=[{"level":"acquisition", "_id":acquisition},{"level":"acquisition", "_id":acquisition2}])
assert r.ok
assert len(r.json()) == 1
assert r.json().get("csv", {}).get("count",0) == 2
r = as_admin.post('/download/summary', json=[{"level":"group", "_id":missing_object_id}])
assert r.status_code == 400
r = as_admin.post('/sessions/' + session + '/analyses', files=file_form(
file_name, meta={'label': 'test', 'outputs':[{'name':file_name}]}))
assert r.ok
analysis = r.json()['_id']
r = as_admin.post('/download/summary', json=[{"level":"analysis", "_id":analysis}])
assert r.ok
assert len(r.json()) == 1
assert r.json().get("tabular data", {}).get("count",0) == 1
|
scitran/core
|
tests/integration_tests/python/test_download.py
|
Python
|
mit
| 26,062
|
from __future__ import unicode_literals, division, absolute_import
import os
from flexget import plugin
from flexget.event import event
from flexget.utils import json
def load_uoccin_data(path):
udata = {}
ufile = os.path.join(path, 'uoccin.json')
if os.path.exists(ufile):
try:
with open(ufile, 'r') as f:
udata = json.load(f)
except Exception as err:
raise plugin.PluginError('error reading %s: %s' % (ufile, err))
udata.setdefault('movies', {})
udata.setdefault('series', {})
return udata
class UoccinLookup(object):
schema = { 'type': 'string', 'format': 'path' }
# Run after metainfo_series / thetvdb_lookup / imdb_lookup
@plugin.priority(100)
def on_task_metainfo(self, task, config):
"""Retrieves all the information found in the uoccin.json file for the entries.
Example::
uoccin_lookup: /path/to/gdrive/uoccin
Resulting fields on entries:
on series (requires tvdb_id):
- uoccin_watchlist (true|false)
- uoccin_rating (integer)
- uoccin_tags (list)
on episodes (requires tvdb_id, series_season and series_episode):
- uoccin_collected (true|false)
- uoccin_watched (true|false)
- uoccin_subtitles (list of language codes)
(plus the 3 series specific fields)
on movies (requires imdb_id):
- uoccin_watchlist (true|false)
- uoccin_collected (true|false)
- uoccin_watched (true|false)
- uoccin_rating (integer)
- uoccin_tags (list)
- uoccin_subtitles (list of language codes)
"""
if not task.entries:
return
udata = load_uoccin_data(config)
movies = udata['movies']
series = udata['series']
for entry in task.entries:
entry['uoccin_watchlist'] = False
entry['uoccin_collected'] = False
entry['uoccin_watched'] = False
entry['uoccin_rating'] = None
entry['uoccin_tags'] = []
entry['uoccin_subtitles'] = []
if 'tvdb_id' in entry:
ser = series.get(str(entry['tvdb_id']))
if ser is None:
continue
entry['uoccin_watchlist'] = ser.get('watchlist', False)
entry['uoccin_rating'] = ser.get('rating')
entry['uoccin_tags'] = ser.get('tags', [])
if all(field in entry for field in ['series_season', 'series_episode']):
season = str(entry['series_season'])
episode = entry['series_episode']
edata = ser.get('collected', {}).get(season, {}).get(str(episode))
entry['uoccin_collected'] = isinstance(edata, list)
entry['uoccin_subtitles'] = edata if entry['uoccin_collected'] else []
entry['uoccin_watched'] = episode in ser.get('watched', {}).get(season, [])
elif 'imdb_id' in entry:
try:
mov = movies.get(entry['imdb_id'])
except plugin.PluginError as e:
self.log.trace('entry %s imdb failed (%s)' % (entry['imdb_id'], e.value))
continue
if mov is None:
continue
entry['uoccin_watchlist'] = mov.get('watchlist', False)
entry['uoccin_collected'] = mov.get('collected', False)
entry['uoccin_watched'] = mov.get('watched', False)
entry['uoccin_rating'] = mov.get('rating')
entry['uoccin_tags'] = mov.get('tags', [])
entry['uoccin_subtitles'] = mov.get('subtitles', [])
@event('plugin.register')
def register_plugin():
plugin.register(UoccinLookup, 'uoccin_lookup', api_ver=2)
|
antivirtel/Flexget
|
flexget/plugins/metainfo/uoccin_lookup.py
|
Python
|
mit
| 3,924
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from model_mommy import mommy
from rolepermissions.roles import RolesManager, AbstractUserRole
class RolRole1(AbstractUserRole):
available_permissions = {
'permission1': True,
'permission2': True,
}
class RolRole2(AbstractUserRole):
available_permissions = {
'permission3': True,
'permission4': False,
}
class RolRole3(AbstractUserRole):
role_name = 'new_name'
available_permissions = {
'permission5': False,
'permission6': False,
}
class AbstractUserRoleTests(TestCase):
def setUp(self):
pass
def test_get_name(self):
self.assertEquals(RolRole1.get_name(), 'rol_role1')
self.assertEquals(RolRole2.get_name(), 'rol_role2')
self.assertEquals(RolRole3.get_name(), 'new_name')
def test_assign_Role1_default_permissions(self):
user = mommy.make(get_user_model())
RolRole1.assign_role_to_user(user)
permissions = user.user_permissions.all()
permission_names_list = [perm.codename for perm in permissions]
self.assertIn('permission1', permission_names_list)
self.assertIn('permission2', permission_names_list)
self.assertEquals(len(permissions), 2)
def test_assign_Role2_default_permissions(self):
user = mommy.make(get_user_model())
RolRole2.assign_role_to_user(user)
permissions = user.user_permissions.all()
permission_names_list = [perm.codename for perm in permissions]
self.assertIn('permission3', permission_names_list)
self.assertNotIn('permission4', permission_names_list)
self.assertEquals(len(permissions), 1)
def test_assign_Role3_default_permissions(self):
user = mommy.make(get_user_model())
RolRole3.assign_role_to_user(user)
permissions = user.user_permissions.all()
permission_names_list = [perm.codename for perm in permissions]
self.assertNotIn('permission5', permission_names_list)
self.assertNotIn('permission6', permission_names_list)
self.assertEquals(len(permissions), 0)
def test_assign_role_to_user(self):
user = mommy.make(get_user_model())
user_role = RolRole1.assign_role_to_user(user)
self.assertEquals(user_role.name, 'rol_role1')
def test_instanciate_role(self):
user = mommy.make(get_user_model())
user_role = RolRole1.assign_role_to_user(user)
self.assertIsNotNone(user_role.pk)
def test_change_user_role(self):
user = mommy.make(get_user_model())
user_role = RolRole1.assign_role_to_user(user)
self.assertEquals(user_role.name, 'rol_role1')
user_role = RolRole2.assign_role_to_user(user)
self.assertEquals(user_role.name, 'rol_role2')
def test_delete_old_permissions_on_role_change(self):
user = mommy.make(get_user_model())
RolRole1().assign_role_to_user(user)
permissions = user.user_permissions.all()
permission_names = [n.codename for n in permissions]
self.assertIn('permission1', permission_names)
self.assertIn('permission2', permission_names)
self.assertEquals(len(permissions), 2)
RolRole2.assign_role_to_user(user)
permissions = user.user_permissions.all()
permission_names = [n.codename for n in permissions]
self.assertNotIn('permission1', permission_names)
self.assertNotIn('permission2', permission_names)
self.assertIn('permission3', permission_names)
self.assertNotIn('permission4', permission_names)
self.assertEquals(len(permissions), 1)
def test_permission_names_list(self):
self.assertIn('permission1', RolRole1.permission_names_list())
self.assertIn('permission2', RolRole1.permission_names_list())
self.assertIn('permission3', RolRole2.permission_names_list())
self.assertIn('permission4', RolRole2.permission_names_list())
class RolesManagerTests(TestCase):
def setUp(self):
pass
def test_retrieve_role(self):
self.assertEquals(RolesManager.retrieve_role('rol_role1'), RolRole1)
self.assertEquals(RolesManager.retrieve_role('rol_role2'), RolRole2)
|
chillbear/django-role-permissions
|
rolepermissions/tests/test_roles.py
|
Python
|
mit
| 4,326
|
import argparse
import sys
# Sieve of Eratosthenes
# Code by David Eppstein, UC Irvine, 28 Feb 2002
# http://code.activestate.com/recipes/117119/
def gen_primes():
""" Generate an infinite sequence of prime numbers.
"""
# Maps composites to primes witnessing their compositeness.
# This is memory efficient, as the sieve is not "run forward"
# indefinitely, but only as long as required by the current
# number being tested.
#
D = {}
# The running integer that's checked for primeness
q = 2
while True:
if q not in D:
# q is a new prime.
# Yield it and mark its first multiple that isn't
# already marked in previous iterations
#
yield q
D[q * q] = [q]
else:
# q is composite. D[q] is the list of primes that
# divide it. Since we've reached q, we no longer
# need it in the map, but we'll mark the next
# multiples of its witnesses to prepare for larger
# numbers
#
for p in D[q]:
D.setdefault(p + q, []).append(p)
del D[q]
q += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate prime number array")
parser.add_argument('n', metavar='N', nargs=1, type=int, help="Limit value")
group = parser.add_mutually_exclusive_group()
group.add_argument('--count', action='store_const', const=True,
default=False, help='limit number of generated prime number (default)')
group.add_argument('--value', action='store_const', const=True,
default=False, help='limit max value of generated prime number')
args = parser.parse_args()
if args.value:
limit = args.n[0]
else:
limit = args.n[0]-2
prime = iter(gen_primes())
sys.stdout.write("{"+str(prime.next()))
for idx, val in enumerate(prime):
if args.value and limit < val:
break
elif limit < idx:
break
sys.stdout.write(", "+str(val))
print("}")
|
everyevery/programming_study
|
tools/gen_prime.py
|
Python
|
mit
| 2,129
|
# -*- coding: utf-8 -*-
#######################################################################
# Name: test_optional_in_choice
# Purpose: Optional matches always succeds but should not stop alternative
# probing on failed match.
# Author: Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# Copyright: (c) 2015 Igor R. Dejanović <igor DOT dejanovic AT gmail DOT com>
# License: MIT License
#######################################################################
from __future__ import unicode_literals
# Grammar
from arpeggio import ParserPython, Optional, EOF
def g(): return [Optional('first'), Optional('second'), Optional('third')], EOF
def test_optional_in_choice():
parser = ParserPython(g)
input_str = "second"
parse_tree = parser.parse(input_str)
assert parse_tree is not None
|
leiyangyou/Arpeggio
|
tests/unit/regressions/issue_20/test_issue_20.py
|
Python
|
mit
| 827
|
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from reviews_manager.models import ClinicalAnnotationStep
from rois_manager.models import Slice, Core, FocusRegion
class SliceAnnotation(models.Model):
author = models.ForeignKey(User, on_delete=models.PROTECT, blank=False)
slice = models.ForeignKey(Slice, on_delete=models.PROTECT, blank=False,
related_name='clinical_annotations')
annotation_step = models.ForeignKey(ClinicalAnnotationStep, on_delete=models.PROTECT,
blank=False, related_name='slice_annotations')
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
high_grade_pin = models.BooleanField(blank=False, null=False, default=False)
pah = models.BooleanField(blank=False, null=False, default=False)
chronic_inflammation = models.BooleanField(blank=False, null=False, default=False)
acute_inflammation = models.BooleanField(blank=False, null=False, default=False)
periglandular_inflammation = models.BooleanField(blank=False, null=False, default=False)
intraglandular_inflammation = models.BooleanField(blank=False, null=False, default=False)
stromal_inflammation = models.BooleanField(blank=False, null=False, default=False)
class Meta:
unique_together = ('slice', 'annotation_step')
def get_gleason_4_total_area(self):
gleason_4_total_area = 0.0
for focus_region in self.slice.get_focus_regions():
try:
focus_region_annotation = FocusRegionAnnotation.objects.get(
focus_region=focus_region,
annotation_step=self.annotation_step
)
gleason_4_total_area += focus_region_annotation.get_total_gleason_4_area()
except FocusRegionAnnotation.DoesNotExist:
pass
return gleason_4_total_area
def get_total_tumor_area(self):
total_tumor_area = 0.0
for core in self.slice.cores.all():
total_tumor_area += core.get_total_tumor_area()
return total_tumor_area
def get_gleason_4_percentage(self):
gleason_4_total_area = self.get_gleason_4_total_area()
total_tumor_area = self.get_total_tumor_area()
try:
return (gleason_4_total_area / total_tumor_area) * 100.0
except ZeroDivisionError:
return -1
def get_action_duration(self):
if self.action_start_time and self.action_complete_time:
return (self.action_complete_time-self.action_start_time).total_seconds()
else:
return None
class CoreAnnotation(models.Model):
GLEASON_GROUP_WHO_16 = (
('GG1', 'GRADE_GROUP_1'), # gleason score <= 6
('GG2', 'GRADE_GROUP_2'), # gleason score 3+4=7
('GG3', 'GRADE_GROUP_3'), # gleason score 4+3=7
('GG4', 'GRADE_GROUP_4'), # gleason score 4+4=8 || 3+5=8 || 5+3=8
('GG5', 'GRADE_GROUP_5') # gleason score 9 or 10
)
author = models.ForeignKey(User, on_delete=models.PROTECT, blank=False)
core = models.ForeignKey(Core, on_delete=models.PROTECT, blank=False,
related_name='clinical_annotations')
annotation_step = models.ForeignKey(ClinicalAnnotationStep, on_delete=models.PROTECT,
blank=False, related_name='core_annotations')
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
primary_gleason = models.IntegerField(blank=False)
secondary_gleason = models.IntegerField(blank=False)
gleason_group = models.CharField(
max_length=3, choices=GLEASON_GROUP_WHO_16, blank=False
)
class Meta:
unique_together = ('core', 'annotation_step')
def get_gleason_4_total_area(self):
gleason_4_total_area = 0.0
for focus_region in self.core.focus_regions.all():
try:
focus_region_annotation = FocusRegionAnnotation.objects.get(
annotation_step=self.annotation_step,
focus_region=focus_region
)
gleason_4_total_area += focus_region_annotation.get_total_gleason_4_area()
except FocusRegionAnnotation.DoesNotExist:
pass
return gleason_4_total_area
def get_total_tumor_area(self):
return self.core.get_total_tumor_area()
def get_gleason_4_percentage(self):
gleason_4_total_area = self.get_gleason_4_total_area()
total_tumor_area = self.get_total_tumor_area()
try:
return (gleason_4_total_area / total_tumor_area) * 100.0
except ZeroDivisionError:
return -1
def get_grade_group_text(self):
for choice in self.GLEASON_GROUP_WHO_16:
if choice[0] == self.gleason_group:
return choice[1]
def get_action_duration(self):
if self.action_start_time and self.action_complete_time:
return (self.action_complete_time-self.action_start_time).total_seconds()
else:
return None
class FocusRegionAnnotation(models.Model):
author = models.ForeignKey(User, on_delete=models.PROTECT, blank=False)
focus_region = models.ForeignKey(FocusRegion, on_delete=models.PROTECT,
blank=False, related_name='clinical_annotations')
annotation_step = models.ForeignKey(ClinicalAnnotationStep, on_delete=models.PROTECT,
blank=False, related_name='focus_region_annotations')
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
# cancerous region fields
perineural_involvement = models.BooleanField(blank=False, null=False, default=False)
intraductal_carcinoma = models.BooleanField(blank=False, null=False, default=False)
ductal_carcinoma = models.BooleanField(blank=False, null=False, default=False)
poorly_formed_glands = models.BooleanField(blank=False, null=False, default=False)
cribriform_pattern = models.BooleanField(blank=False, null=False, default=False)
small_cell_signet_ring = models.BooleanField(blank=False, null=False, default=False)
hypernephroid_pattern = models.BooleanField(blank=False, null=False, default=False)
mucinous = models.BooleanField(blank=False, null=False, default=False)
comedo_necrosis = models.BooleanField(blank=False, null=False, default=False)
# stressed region fields
inflammation = models.BooleanField(blank=False, null=False, default=False)
pah = models.BooleanField(blank=False, null=False, default=False)
atrophic_lesions = models.BooleanField(blank=False, null=False, default=False)
adenosis = models.BooleanField(blank=False, null=False, default=False)
# ---
cellular_density_helper_json = models.TextField(blank=True, null=True)
cellular_density = models.IntegerField(blank=True, null=True)
cells_count = models.IntegerField(blank=True, null=True)
class Meta:
unique_together = ('focus_region', 'annotation_step')
def get_total_gleason_4_area(self):
g4_area = 0
for g4 in self.get_gleason_4_elements():
g4_area += g4.area
return g4_area
def get_gleason_4_elements(self):
return self.gleason_elements.filter(gleason_type='G4')
def get_gleason_4_percentage(self):
g4_area = self.get_total_gleason_4_area()
try:
return (g4_area / self.focus_region.area) * 100.0
except ZeroDivisionError:
return -1
def get_action_duration(self):
if self.action_start_time and self.action_complete_time:
return (self.action_complete_time-self.action_start_time).total_seconds()
else:
return None
class GleasonElement(models.Model):
GLEASON_TYPES = (
('G1', 'GLEASON 1'),
('G2', 'GLEASON 2'),
('G3', 'GLEASON 3'),
('G4', 'GLEASON 4'),
('G5', 'GLEASON 5')
)
focus_region_annotation = models.ForeignKey(FocusRegionAnnotation, related_name='gleason_elements',
blank=False, on_delete=models.CASCADE)
gleason_type = models.CharField(max_length=2, choices=GLEASON_TYPES, blank=False, null=False)
json_path = models.TextField(blank=False, null=False)
area = models.FloatField(blank=False, null=False)
cellular_density_helper_json = models.TextField(blank=True, null=True)
cellular_density = models.IntegerField(blank=True, null=True)
cells_count = models.IntegerField(blank=True, null=True)
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
def get_gleason_type_label(self):
for choice in self.GLEASON_TYPES:
if choice[0] == self.gleason_type:
return choice[1]
def get_action_duration(self):
if self.action_start_time and self.action_complete_time:
return (self.action_complete_time-self.action_start_time).total_seconds()
else:
return None
|
lucalianas/ProMort
|
promort/clinical_annotations_manager/models.py
|
Python
|
mit
| 10,721
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_lost_aqualish_soldier_female_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","aqualish_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_lost_aqualish_soldier_female_01.py
|
Python
|
mit
| 469
|
import operator
from textwrap import dedent
from twisted.trial import unittest
from ometa.grammar import OMeta, TermOMeta, TreeTransformerGrammar
from ometa.compat import OMeta1
from ometa.runtime import (ParseError, OMetaBase, OMetaGrammarBase, EOFError,
expected, TreeTransformerBase)
from ometa.interp import GrammarInterpreter, TrampolinedGrammarInterpreter
from terml.parser import parseTerm as term
class HandyWrapper(object):
"""
Convenient grammar wrapper for parsing strings.
"""
def __init__(self, klass):
"""
@param klass: The grammar class to be wrapped.
"""
self.klass = klass
def __getattr__(self, name):
"""
Return a function that will instantiate a grammar and invoke the named
rule.
@param: Rule name.
"""
def doIt(s):
"""
@param s: The string to be parsed by the wrapped grammar.
"""
obj = self.klass(s)
ret, err = obj.apply(name)
try:
extra, _ = obj.input.head()
except EOFError:
try:
return ''.join(ret)
except TypeError:
return ret
else:
raise err
return doIt
class OMeta1TestCase(unittest.TestCase):
"""
Tests of OMeta grammar compilation, with v1 syntax.
"""
classTested = OMeta1
def compile(self, grammar):
"""
Produce an object capable of parsing via this grammar.
@param grammar: A string containing an OMeta grammar.
"""
m = self.classTested.makeGrammar(dedent(grammar), 'TestGrammar')
g = m.createParserClass(OMetaBase, globals())
return HandyWrapper(g)
def test_literals(self):
"""
Input matches can be made on literal characters.
"""
g = self.compile("digit ::= '1'")
self.assertEqual(g.digit("1"), "1")
self.assertRaises(ParseError, g.digit, "4")
def test_multipleRules(self):
"""
Grammars with more than one rule work properly.
"""
g = self.compile("""
digit ::= '1'
aLetter ::= 'a'
""")
self.assertEqual(g.digit("1"), "1")
self.assertRaises(ParseError, g.digit, "4")
def test_escapedLiterals(self):
"""
Input matches can be made on escaped literal characters.
"""
g = self.compile(r"newline ::= '\n'")
self.assertEqual(g.newline("\n"), "\n")
def test_integers(self):
"""
Input matches can be made on literal integers.
"""
g = self.compile("stuff ::= 17 0x1F -2 0177")
self.assertEqual(g.stuff([17, 0x1f, -2, 0177]), 0177)
self.assertRaises(ParseError, g.stuff, [1, 2, 3])
def test_star(self):
"""
Input matches can be made on zero or more repetitions of a pattern.
"""
g = self.compile("xs ::= 'x'*")
self.assertEqual(g.xs(""), "")
self.assertEqual(g.xs("x"), "x")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "xy")
def test_plus(self):
"""
Input matches can be made on one or more repetitions of a pattern.
"""
g = self.compile("xs ::= 'x'+")
self.assertEqual(g.xs("x"), "x")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "xy")
self.assertRaises(ParseError, g.xs, "")
def test_sequencing(self):
"""
Input matches can be made on a sequence of patterns.
"""
g = self.compile("twelve ::= '1' '2'")
self.assertEqual(g.twelve("12"), "2");
self.assertRaises(ParseError, g.twelve, "1")
def test_alternatives(self):
"""
Input matches can be made on one of a set of alternatives.
"""
g = self.compile("digit ::= '0' | '1' | '2'")
self.assertEqual(g.digit("0"), "0")
self.assertEqual(g.digit("1"), "1")
self.assertEqual(g.digit("2"), "2")
self.assertRaises(ParseError, g.digit, "3")
def test_optional(self):
"""
Subpatterns can be made optional.
"""
g = self.compile("foo ::= 'x' 'y'? 'z'")
self.assertEqual(g.foo("xyz"), 'z')
self.assertEqual(g.foo("xz"), 'z')
def test_apply(self):
"""
Other productions can be invoked from within a production.
"""
g = self.compile("""
digit ::= '0' | '1'
bits ::= <digit>+
""")
self.assertEqual(g.bits('0110110'), '0110110')
def test_negate(self):
"""
Input can be matched based on its failure to match a pattern.
"""
g = self.compile("foo ::= ~'0' <anything>")
self.assertEqual(g.foo("1"), "1")
self.assertRaises(ParseError, g.foo, "0")
def test_ruleValue(self):
"""
Productions can specify a Python expression that provides the result
of the parse.
"""
g = self.compile("foo ::= '1' => 7")
self.assertEqual(g.foo('1'), 7)
def test_ruleValueEscapeQuotes(self):
"""
Escaped quotes are handled properly in Python expressions.
"""
g = self.compile(r"""escapedChar ::= '\'' => '\\\''""")
self.assertEqual(g.escapedChar("'"), "\\'")
def test_ruleValueEscapeSlashes(self):
"""
Escaped slashes are handled properly in Python expressions.
"""
g = self.compile(r"""escapedChar ::= '\\' => '\\'""")
self.assertEqual(g.escapedChar("\\"), "\\")
def test_lookahead(self):
"""
Doubled negation does lookahead.
"""
g = self.compile("""
foo ::= ~~(:x) <bar x>
bar :x ::= :a :b ?(x == a == b) => x
""")
self.assertEqual(g.foo("11"), '1')
self.assertEqual(g.foo("22"), '2')
def test_binding(self):
"""
The result of a parsing expression can be bound to a name.
"""
g = self.compile("foo ::= '1':x => int(x) * 2")
self.assertEqual(g.foo("1"), 2)
def test_bindingAccess(self):
"""
Bound names in a rule can be accessed on the grammar's "locals" dict.
"""
G = self.classTested.makeGrammar(
"stuff ::= '1':a ('2':b | '3':c)", 'TestGrammar').createParserClass(OMetaBase, {})
g = G("12")
self.assertEqual(g.apply("stuff")[0], '2')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['b'], '2')
g = G("13")
self.assertEqual(g.apply("stuff")[0], '3')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['c'], '3')
def test_predicate(self):
"""
Python expressions can be used to determine the success or failure of a
parse.
"""
g = self.compile("""
digit ::= '0' | '1'
double_bits ::= <digit>:a <digit>:b ?(a == b) => int(b)
""")
self.assertEqual(g.double_bits("00"), 0)
self.assertEqual(g.double_bits("11"), 1)
self.assertRaises(ParseError, g.double_bits, "10")
self.assertRaises(ParseError, g.double_bits, "01")
def test_parens(self):
"""
Parens can be used to group subpatterns.
"""
g = self.compile("foo ::= 'a' ('b' | 'c')")
self.assertEqual(g.foo("ab"), "b")
self.assertEqual(g.foo("ac"), "c")
def test_action(self):
"""
Python expressions can be run as actions with no effect on the result
of the parse.
"""
g = self.compile("""foo ::= ('1'*:ones !(False) !(ones.insert(0, '0')) => ''.join(ones))""")
self.assertEqual(g.foo("111"), "0111")
def test_bindNameOnly(self):
"""
A pattern consisting of only a bind name matches a single element and
binds it to that name.
"""
g = self.compile("foo ::= '1' :x '2' => x")
self.assertEqual(g.foo("132"), "3")
def test_args(self):
"""
Productions can take arguments.
"""
g = self.compile("""
digit ::= ('0' | '1' | '2'):d => int(d)
foo :x :ignored ::= (?(x > 1) '9' | ?(x <= 1) '8'):d => int(d)
baz ::= <digit>:a <foo a None>:b => [a, b]
""")
self.assertEqual(g.baz("18"), [1, 8])
self.assertEqual(g.baz("08"), [0, 8])
self.assertEqual(g.baz("29"), [2, 9])
self.assertRaises(ParseError, g.foo, "28")
def test_patternMatch(self):
"""
Productions can pattern-match on arguments.
Also, multiple definitions of a rule can be done in sequence.
"""
g = self.compile("""
fact 0 => 1
fact :n ::= <fact (n - 1)>:m => n * m
""")
self.assertEqual(g.fact([3]), 6)
def test_listpattern(self):
"""
Brackets can be used to match contents of lists.
"""
g = self.compile("""
digit ::= :x ?(x.isdigit()) => int(x)
interp ::= [<digit>:x '+' <digit>:y] => x + y
""")
self.assertEqual(g.interp([['3', '+', '5']]), 8)
def test_listpatternresult(self):
"""
The result of a list pattern is the entire list.
"""
g = self.compile("""
digit ::= :x ?(x.isdigit()) => int(x)
interp ::= [<digit>:x '+' <digit>:y]:z => (z, x + y)
""")
e = ['3', '+', '5']
self.assertEqual(g.interp([e]), (e, 8))
def test_recursion(self):
"""
Rules can call themselves.
"""
g = self.compile("""
interp ::= (['+' <interp>:x <interp>:y] => x + y
| ['*' <interp>:x <interp>:y] => x * y
| :x ?(isinstance(x, str) and x.isdigit()) => int(x))
""")
self.assertEqual(g.interp([['+', '3', ['*', '5', '2']]]), 13)
def test_leftrecursion(self):
"""
Left-recursion is detected and compiled appropriately.
"""
g = self.compile("""
num ::= (<num>:n <digit>:d => n * 10 + d
| <digit>)
digit ::= :x ?(x.isdigit()) => int(x)
""")
self.assertEqual(g.num("3"), 3)
self.assertEqual(g.num("32767"), 32767)
def test_characterVsSequence(self):
"""
Characters (in single-quotes) are not regarded as sequences.
"""
g = self.compile("""
interp ::= ([<interp>:x '+' <interp>:y] => x + y
| [<interp>:x '*' <interp>:y] => x * y
| :x ?(isinstance(x, basestring) and x.isdigit()) => int(x))
""")
self.assertEqual(g.interp([['3', '+', ['5', '*', '2']]]), 13)
self.assertEqual(g.interp([[u'3', u'+', [u'5', u'*', u'2']]]), 13)
def test_string(self):
"""
Strings in double quotes match string objects.
"""
g = self.compile("""
interp ::= ["Foo" 1 2] => 3
""")
self.assertEqual(g.interp([["Foo", 1, 2]]), 3)
def test_argEscape(self):
"""
Regression test for bug #239344.
"""
g = self.compile("""
memo_arg :arg ::= <anything> ?(False)
trick ::= <letter> <memo_arg 'c'>
broken ::= <trick> | <anything>*
""")
self.assertEqual(g.broken('ab'), 'ab')
def test_comments(self):
"""
Comments in grammars are accepted and ignored.
"""
g = self.compile("""
#comment here
digit ::= ( '0' #second comment
| '1') #another one
#comments after rules are cool too
bits ::= <digit>+ #last one
""")
self.assertEqual(g.bits('0110110'), '0110110')
def test_accidental_bareword(self):
"""
Accidental barewords are treated as syntax errors in the grammar.
"""
self.assertRaises(ParseError,
self.compile, """
atom ::= ~('|') :a => Regex_Atom(a)
| ' ' atom:a
""")
class OMetaTestCase(unittest.TestCase):
"""
Tests of OMeta grammar compilation.
"""
classTested = OMeta
def compile(self, grammar, globals=None):
"""
Produce an object capable of parsing via this grammar.
@param grammar: A string containing an OMeta grammar.
"""
g = self.classTested.makeGrammar(grammar, 'TestGrammar').createParserClass(OMetaBase, globals or {})
return HandyWrapper(g)
def test_literals(self):
"""
Input matches can be made on literal characters.
"""
g = self.compile("digit = '1'")
self.assertEqual(g.digit("1"), "1")
self.assertRaises(ParseError, g.digit, "4")
def test_escaped_char(self):
"""
Hex escapes are supported in strings in grammars.
"""
g = self.compile(r"bel = '\x07'")
self.assertEqual(g.bel("\x07"), "\x07")
def test_literals_multi(self):
"""
Input matches can be made on multiple literal characters at
once.
"""
g = self.compile("foo = 'foo'")
self.assertEqual(g.foo("foo"), "foo")
self.assertRaises(ParseError, g.foo, "for")
def test_token(self):
"""
Input matches can be made on tokens, which default to
consuming leading whitespace.
"""
g = self.compile('foo = "foo"')
self.assertEqual(g.foo(" foo"), "foo")
self.assertRaises(ParseError, g.foo, "fog")
def test_multipleRules(self):
"""
Grammars with more than one rule work properly.
"""
g = self.compile("""
digit = '1'
aLetter = 'a'
""")
self.assertEqual(g.digit("1"), "1")
self.assertRaises(ParseError, g.digit, "4")
def test_escapedLiterals(self):
"""
Input matches can be made on escaped literal characters.
"""
g = self.compile(r"newline = '\n'")
self.assertEqual(g.newline("\n"), "\n")
def test_integers(self):
"""
Input matches can be made on literal integers.
"""
g = self.compile("stuff = 17 0x1F -2 0177")
self.assertEqual(g.stuff([17, 0x1f, -2, 0177]), 0177)
self.assertRaises(ParseError, g.stuff, [1, 2, 3])
def test_star(self):
"""
Input matches can be made on zero or more repetitions of a pattern.
"""
g = self.compile("xs = 'x'*")
self.assertEqual(g.xs(""), "")
self.assertEqual(g.xs("x"), "x")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "xy")
def test_plus(self):
"""
Input matches can be made on one or more repetitions of a pattern.
"""
g = self.compile("xs = 'x'+")
self.assertEqual(g.xs("x"), "x")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "xy")
self.assertRaises(ParseError, g.xs, "")
def test_repeat(self):
"""
Match repetitions can be specifically numbered.
"""
g = self.compile("xs = 'x'{2, 4}:n 'x'* -> n")
self.assertEqual(g.xs("xx"), "xx")
self.assertEqual(g.xs("xxxx"), "xxxx")
self.assertEqual(g.xs("xxxxxx"), "xxxx")
self.assertRaises(ParseError, g.xs, "x")
self.assertRaises(ParseError, g.xs, "")
def test_repeat_single(self):
"""
Match repetitions can be specifically numbered.
"""
g = self.compile("xs = 'x'{3}:n 'x'* -> n")
self.assertEqual(g.xs("xxx"), "xxx")
self.assertEqual(g.xs("xxxxxx"), "xxx")
self.assertRaises(ParseError, g.xs, "xx")
def test_repeat_zero(self):
"""
Match repetitions can be specifically numbered.
"""
g = self.compile("xs = 'x'{0}:n 'y' -> n")
self.assertEqual(g.xs("y"), "")
self.assertRaises(ParseError, g.xs, "xy")
def test_repeat_zero_n(self):
"""
Match repetitions can be specifically numbered.
"""
g = self.compile("""
xs :n = 'x'{n}:a 'y' -> a
start = xs(0)
""")
self.assertEqual(g.start("y"), "")
self.assertRaises(ParseError, g.start, "xy")
def test_repeat_var(self):
"""
Match repetitions can be variables.
"""
g = self.compile("xs = (:v -> int(v)):n 'x'{n}:xs 'x'* -> xs")
self.assertEqual(g.xs("2xx"), "xx")
self.assertEqual(g.xs("4xxxx"), "xxxx")
self.assertEqual(g.xs("3xxxxxx"), "xxx")
self.assertRaises(ParseError, g.xs, "2x")
self.assertRaises(ParseError, g.xs, "1")
def test_sequencing(self):
"""
Input matches can be made on a sequence of patterns.
"""
g = self.compile("twelve = '1' '2'")
self.assertEqual(g.twelve("12"), "2");
self.assertRaises(ParseError, g.twelve, "1")
def test_alternatives(self):
"""
Input matches can be made on one of a set of alternatives.
"""
g = self.compile("digit = '0' | '1' | '2'")
self.assertEqual(g.digit("0"), "0")
self.assertEqual(g.digit("1"), "1")
self.assertEqual(g.digit("2"), "2")
self.assertRaises(ParseError, g.digit, "3")
def test_optional(self):
"""
Subpatterns can be made optional.
"""
g = self.compile("foo = 'x' 'y'? 'z'")
self.assertEqual(g.foo("xyz"), 'z')
self.assertEqual(g.foo("xz"), 'z')
def test_apply(self):
"""
Other productions can be invoked from within a production.
"""
g = self.compile("""
digit = '0' | '1'
bits = digit+
""")
self.assertEqual(g.bits('0110110'), '0110110')
def test_negate(self):
"""
Input can be matched based on its failure to match a pattern.
"""
g = self.compile("foo = ~'0' anything")
self.assertEqual(g.foo("1"), "1")
self.assertRaises(ParseError, g.foo, "0")
def test_ruleValue(self):
"""
Productions can specify a Python expression that provides the result
of the parse.
"""
g = self.compile("foo = '1' -> 7")
self.assertEqual(g.foo('1'), 7)
def test_lookahead(self):
"""
Doubled negation does lookahead.
"""
g = self.compile("""
foo = ~~(:x) bar(x)
bar :x = :a :b ?(x == a == b) -> x
""")
self.assertEqual(g.foo("11"), '1')
self.assertEqual(g.foo("22"), '2')
def test_binding(self):
"""
The result of a parsing expression can be bound to a name.
"""
g = self.compile("foo = '1':x -> int(x) * 2")
self.assertEqual(g.foo("1"), 2)
def test_bindingAccess(self):
"""
Bound names in a rule can be accessed on the grammar's "locals" dict.
"""
G = self.classTested.makeGrammar(
"stuff = '1':a ('2':b | '3':c)", 'TestGrammar').createParserClass(OMetaBase, {})
g = G("12")
self.assertEqual(g.apply("stuff")[0], '2')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['b'], '2')
g = G("13")
self.assertEqual(g.apply("stuff")[0], '3')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['c'], '3')
def test_predicate(self):
"""
Python expressions can be used to determine the success or
failure of a parse.
"""
g = self.compile("""
digit = '0' | '1'
double_bits = digit:a digit:b ?(a == b) -> int(b)
""")
self.assertEqual(g.double_bits("00"), 0)
self.assertEqual(g.double_bits("11"), 1)
self.assertRaises(ParseError, g.double_bits, "10")
self.assertRaises(ParseError, g.double_bits, "01")
def test_parens(self):
"""
Parens can be used to group subpatterns.
"""
g = self.compile("foo = 'a' ('b' | 'c')")
self.assertEqual(g.foo("ab"), "b")
self.assertEqual(g.foo("ac"), "c")
def test_action(self):
"""
Python expressions can be run as actions with no effect on the
result of the parse.
"""
g = self.compile("""foo = ('1'*:ones !(False) !(ones.insert(0, '0')) -> ''.join(ones))""")
self.assertEqual(g.foo("111"), "0111")
def test_bindNameOnly(self):
"""
A pattern consisting of only a bind name matches a single element and
binds it to that name.
"""
g = self.compile("foo = '1' :x '2' -> x")
self.assertEqual(g.foo("132"), "3")
def test_args(self):
"""
Productions can take arguments.
"""
g = self.compile("""
digit = ('0' | '1' | '2'):d -> int(d)
foo :x = (?(x > 1) '9' | ?(x <= 1) '8'):d -> int(d)
baz = digit:a foo(a):b -> [a, b]
""")
self.assertEqual(g.baz("18"), [1, 8])
self.assertEqual(g.baz("08"), [0, 8])
self.assertEqual(g.baz("29"), [2, 9])
self.assertRaises(ParseError, g.foo, "28")
def test_patternMatch(self):
"""
Productions can pattern-match on arguments.
Also, multiple definitions of a rule can be done in sequence.
"""
g = self.compile("""
fact 0 -> 1
fact :n = fact((n - 1)):m -> n * m
""")
self.assertEqual(g.fact([3]), 6)
def test_listpattern(self):
"""
Brackets can be used to match contents of lists.
"""
g = self.compile("""
digit = :x ?(x.isdigit()) -> int(x)
interp = [digit:x '+' digit:y] -> x + y
""")
self.assertEqual(g.interp([['3', '+', '5']]), 8)
def test_listpatternresult(self):
"""
The result of a list pattern is the entire list.
"""
g = self.compile("""
digit = :x ?(x.isdigit()) -> int(x)
interp = [digit:x '+' digit:y]:z -> (z, x + y)
""")
e = ['3', '+', '5']
self.assertEqual(g.interp([e]), (e, 8))
def test_recursion(self):
"""
Rules can call themselves.
"""
g = self.compile("""
interp = (['+' interp:x interp:y] -> x + y
| ['*' interp:x interp:y] -> x * y
| :x ?(isinstance(x, str) and x.isdigit()) -> int(x))
""")
self.assertEqual(g.interp([['+', '3', ['*', '5', '2']]]), 13)
def test_leftrecursion(self):
"""
Left-recursion is detected and compiled appropriately.
"""
g = self.compile("""
num = (num:n digit:d -> n * 10 + d
| digit)
digit = :x ?(x.isdigit()) -> int(x)
""")
self.assertEqual(g.num("3"), 3)
self.assertEqual(g.num("32767"), 32767)
def test_characterVsSequence(self):
"""
Characters (in single-quotes) are not regarded as sequences.
"""
g = self.compile("""
interp = ([interp:x '+' interp:y] -> x + y
| [interp:x '*' interp:y] -> x * y
| :x ?(isinstance(x, basestring) and x.isdigit()) -> int(x))
""")
self.assertEqual(g.interp([['3', '+', ['5', '*', '2']]]), 13)
self.assertEqual(g.interp([[u'3', u'+', [u'5', u'*', u'2']]]), 13)
def test_stringConsumedBy(self):
"""
OMeta2's "consumed-by" operator works on strings.
"""
g = self.compile("""
ident = <letter (letter | digit)*>
""")
self.assertEqual(g.ident("a"), "a")
self.assertEqual(g.ident("abc"), "abc")
self.assertEqual(g.ident("a1z"), "a1z")
self.assertRaises(ParseError, g.ident, "1a")
def test_listConsumedBy(self):
"""
OMeta2's "consumed-by" operator works on lists.
"""
g = self.compile("""
ands = [<"And" (ors | vals)*>:x] -> x
ors = [<"Or" vals*:x>] -> x
vals = 1 | 0
""")
self.assertEqual(g.ands([["And", ["Or", 1, 0], 1]]),
["And", ["Or", 1, 0], 1])
def test_string(self):
"""
Strings in double quotes match string objects.
"""
g = self.compile("""
interp = ["Foo" 1 2] -> 3
""")
self.assertEqual(g.interp([["Foo", 1, 2]]), 3)
def test_argEscape(self):
"""
Regression test for bug #239344.
"""
g = self.compile("""
memo_arg :arg = anything ?(False)
trick = letter memo_arg('c')
broken = trick | anything*
""")
self.assertEqual(g.broken('ab'), 'ab')
class TermActionGrammarTests(OMetaTestCase):
classTested = TermOMeta
def test_binding(self):
"""
The result of a parsing expression can be bound to a name.
"""
g = self.compile("foo = '1':x -> mul(int(x), 2)",
{"mul": operator.mul})
self.assertEqual(g.foo("1"), 2)
def test_bindingAccess(self):
"""
Bound names in a rule can be accessed on the grammar's "locals" dict.
"""
G = self.classTested.makeGrammar(
"stuff = '1':a ('2':b | '3':c)", 'TestGrammar').createParserClass(OMetaBase, {})
g = G("12")
self.assertEqual(g.apply("stuff")[0], '2')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['b'], '2')
g = G("13")
self.assertEqual(g.apply("stuff")[0], '3')
self.assertEqual(g.locals['stuff']['a'], '1')
self.assertEqual(g.locals['stuff']['c'], '3')
def test_predicate(self):
"""
Term actions can be used to determine the success or
failure of a parse.
"""
g = self.compile("""
digit = '0' | '1'
double_bits = digit:a digit:b ?(equal(a, b)) -> int(b)
""", {"equal": operator.eq})
self.assertEqual(g.double_bits("00"), 0)
self.assertEqual(g.double_bits("11"), 1)
self.assertRaises(ParseError, g.double_bits, "10")
self.assertRaises(ParseError, g.double_bits, "01")
def test_action(self):
"""
Term actions can be run as actions with no effect on the
result of the parse.
"""
g = self.compile(
"""foo = ('1'*:ones !(False)
!(nconc(ones, '0')) -> join(ones))""",
{"nconc": lambda lst, val: lst.insert(0, val),
"join": ''.join})
self.assertEqual(g.foo("111"), "0111")
def test_patternMatch(self):
"""
Productions can pattern-match on arguments.
Also, multiple definitions of a rule can be done in sequence.
"""
g = self.compile("""
fact 0 -> 1
fact :n = fact(decr(n)):m -> mul(n, m)
""", {"mul": operator.mul, "decr": lambda x: x -1})
self.assertEqual(g.fact([3]), 6)
def test_listpattern(self):
"""
Brackets can be used to match contents of lists.
"""
g = self.compile("""
digit = :x ?(x.isdigit()) -> int(x)
interp = [digit:x '+' digit:y] -> add(x, y)
""", {"add": operator.add})
self.assertEqual(g.interp([['3', '+', '5']]), 8)
def test_listpatternresult(self):
"""
The result of a list pattern is the entire list.
"""
g = self.compile("""
digit = :x ?(x.isdigit()) -> int(x)
interp = [digit:x '+' digit:y]:z -> [z, plus(x, y)]
""", {"plus": operator.add})
e = ['3', '+', '5']
self.assertEqual(g.interp([e]), [e, 8])
def test_recursion(self):
"""
Rules can call themselves.
"""
g = self.compile("""
interp = (['+' interp:x interp:y] -> add(x, y)
| ['*' interp:x interp:y] -> mul(x, y)
| :x ?(isdigit(x)) -> int(x))
""", {"mul": operator.mul,
"add": operator.add,
"isdigit": lambda x: str(x).isdigit()})
self.assertEqual(g.interp([['+', '3', ['*', '5', '2']]]), 13)
def test_leftrecursion(self):
"""
Left-recursion is detected and compiled appropriately.
"""
g = self.compile("""
num = (num:n digit:d -> makeInt(n, d)
| digit)
digit = :x ?(isdigit(x)) -> int(x)
""", {"makeInt": lambda x, y: x * 10 + y,
"isdigit": lambda x: x.isdigit()})
self.assertEqual(g.num("3"), 3)
self.assertEqual(g.num("32767"), 32767)
def test_characterVsSequence(self):
"""
Characters (in single-quotes) are not regarded as sequences.
"""
g = self.compile(
"""
interp = ([interp:x '+' interp:y] -> add(x, y)
| [interp:x '*' interp:y] -> mul(x, y)
| :x ?(isdigit(x)) -> int(x))
""",
{"add": operator.add, "mul": operator.mul,
"isdigit": lambda x: isinstance(x, basestring) and x.isdigit()})
self.assertEqual(g.interp([['3', '+', ['5', '*', '2']]]), 13)
self.assertEqual(g.interp([[u'3', u'+', [u'5', u'*', u'2']]]), 13)
def test_string(self):
"""
Strings in double quotes match string objects.
"""
g = self.compile("""
interp = ["Foo" 1 2] -> 3
""")
self.assertEqual(g.interp([["Foo", 1, 2]]), 3)
def test_argEscape(self):
"""
Regression test for bug #239344.
"""
g = self.compile("""
memo_arg :arg = anything ?(False)
trick = letter memo_arg('c')
broken = trick | anything*
""")
self.assertEqual(g.broken('ab'), 'ab')
def test_lookahead(self):
"""
Doubled negation does lookahead.
"""
g = self.compile("""
foo = ~~(:x) bar(x)
bar :x = :a :b ?(equal(x, a, b)) -> x
""",
{"equal": lambda i, j, k: i == j == k})
self.assertEqual(g.foo("11"), '1')
self.assertEqual(g.foo("22"), '2')
def test_args(self):
"""
Productions can take arguments.
"""
g = self.compile("""
digit = ('0' | '1' | '2'):d -> int(d)
foo :x = (?(gt(x, 1)) '9' | ?(lte(x, 1)) '8'):d -> int(d)
baz = digit:a foo(a):b -> [a, b]
""", {"lte": operator.le, "gt": operator.gt})
self.assertEqual(g.baz("18"), [1, 8])
self.assertEqual(g.baz("08"), [0, 8])
self.assertEqual(g.baz("29"), [2, 9])
self.assertRaises(ParseError, g.foo, "28")
class PyExtractorTest(unittest.TestCase):
"""
Tests for finding Python expressions in OMeta grammars.
"""
def findInGrammar(self, expr):
"""
L{OMeta.pythonExpr()} can extract a single Python expression from a
string, ignoring the text following it.
"""
o = OMetaGrammarBase(expr + "\nbaz = ...\n")
self.assertEqual(o.pythonExpr()[0][0], expr)
def test_expressions(self):
"""
L{OMeta.pythonExpr()} can recognize various paired delimiters properly
and include newlines in expressions where appropriate.
"""
self.findInGrammar("x")
self.findInGrammar("(x + 1)")
self.findInGrammar("{x: (y)}")
self.findInGrammar("x, '('")
self.findInGrammar('x, "("')
self.findInGrammar('x, """("""')
self.findInGrammar('(x +\n 1)')
self.findInGrammar('[x, "]",\n 1]')
self.findInGrammar('{x: "]",\ny: "["}')
o = OMetaGrammarBase("foo(x[1]])\nbaz = ...\n")
self.assertRaises(ParseError, o.pythonExpr)
o = OMetaGrammarBase("foo(x[1]\nbaz = ...\n")
self.assertRaises(ParseError, o.pythonExpr)
class MakeGrammarTest(unittest.TestCase):
"""
Test the definition of grammars via the 'makeGrammar' method.
"""
def test_makeGrammar(self):
results = []
grammar = """
digit = :x ?('0' <= x <= '9') -> int(x)
num = (num:n digit:d !(results.append(True)) -> n * 10 + d
| digit)
"""
TestGrammar = OMeta.makeGrammar(grammar, "G").createParserClass(OMetaBase, {'results':results})
g = TestGrammar("314159")
self.assertEqual(g.apply("num")[0], 314159)
self.assertNotEqual(len(results), 0)
def test_brokenGrammar(self):
grammar = """
andHandler = handler:h1 'and handler:h2 -> And(h1, h2)
"""
e = self.assertRaises(ParseError, OMeta.makeGrammar, grammar,
"Foo")
self.assertEquals(e.position, 56)
self.assertEquals(e.error, [("expected", None, "\r\n"), ("message", "end of input")])
def test_subclassing(self):
"""
A subclass of an OMeta subclass should be able to call rules on its
parent, and access variables in its scope.
"""
grammar1 = """
dig = :x ?(a <= x <= b) -> int(x)
"""
TestGrammar1 = OMeta.makeGrammar(grammar1, "G").createParserClass(OMetaBase, {'a':'0', 'b':'9'})
grammar2 = """
num = (num:n dig:d -> n * base + d
| dig)
"""
TestGrammar2 = OMeta.makeGrammar(grammar2, "G2").createParserClass(TestGrammar1, {'base':10})
g = TestGrammar2("314159")
self.assertEqual(g.apply("num")[0], 314159)
grammar3 = """
dig = :x ?(a <= x <= b or c <= x <= d) -> int(x, base)
"""
TestGrammar3 = OMeta.makeGrammar(grammar3, "G3").createParserClass(
TestGrammar2, {'c':'a', 'd':'f', 'base':16})
g = TestGrammar3("abc123")
self.assertEqual(g.apply("num")[0], 11256099)
def test_super(self):
"""
Rules can call the implementation in a superclass.
"""
grammar1 = "expr = letter"
TestGrammar1 = OMeta.makeGrammar(grammar1, "G").createParserClass(OMetaBase, {})
grammar2 = "expr = super | digit"
TestGrammar2 = OMeta.makeGrammar(grammar2, "G2").createParserClass(TestGrammar1, {})
self.assertEqual(TestGrammar2("x").apply("expr")[0], "x")
self.assertEqual(TestGrammar2("3").apply("expr")[0], "3")
def test_foreign(self):
"""
Rules can call the implementation in a superclass.
"""
grammar_letter = "expr = letter"
GrammarLetter = OMeta.makeGrammar(grammar_letter, "G").createParserClass(OMetaBase, {})
grammar_digit = "expr '5' = digit"
GrammarDigit = OMeta.makeGrammar(grammar_digit, "H").createParserClass(OMetaBase, {})
grammar = ("expr = !(grammar_digit_global):grammar_digit "
"grammar_letter.expr | grammar_digit.expr('5')")
TestGrammar = OMeta.makeGrammar(grammar, "I").createParserClass(
OMetaBase,
{"grammar_letter": GrammarLetter,
"grammar_digit_global": GrammarDigit
})
self.assertEqual(TestGrammar("x").apply("expr")[0], "x")
self.assertEqual(TestGrammar("3").apply("expr")[0], "3")
class HandyInterpWrapper(object):
"""
Convenient grammar wrapper for parsing strings.
"""
def __init__(self, interp):
self._interp = interp
def __getattr__(self, name):
"""
Return a function that will instantiate a grammar and invoke the named
rule.
@param: Rule name.
"""
def doIt(s):
"""
@param s: The string to be parsed by the wrapped grammar.
"""
# totally cheating
tree = not isinstance(s, basestring)
input, ret, err = self._interp.apply(s, name, tree)
try:
extra, _ = input.head()
except EOFError:
try:
return ''.join(ret)
except TypeError:
return ret
else:
raise err
return doIt
class InterpTestCase(OMetaTestCase):
def compile(self, grammar, globals=None):
"""
Produce an object capable of parsing via this grammar.
@param grammar: A string containing an OMeta grammar.
"""
g = OMeta(grammar)
tree = g.parseGrammar('TestGrammar')
g = GrammarInterpreter(tree, OMetaBase, globals)
return HandyInterpWrapper(g)
class TrampolinedInterpWrapper(object):
"""
Convenient grammar wrapper for parsing strings.
"""
def __init__(self, tree, globals):
self._tree = tree
self._globals = globals
def __getattr__(self, name):
"""
Return a function that will instantiate a grammar and invoke the named
rule.
@param: Rule name.
"""
def doIt(s):
"""
@param s: The string to be parsed by the wrapped grammar.
"""
tree = not isinstance(s, basestring)
if tree:
raise unittest.SkipTest("Not applicable for push parsing")
results = []
def whenDone(val, err):
results.append(val)
parser = TrampolinedGrammarInterpreter(self._tree, name, whenDone,
self._globals)
for i, c in enumerate(s):
assert len(results) == 0
parser.receive(c)
parser.end()
if results and parser.input.position == len(parser.input.data):
try:
return ''.join(results[0])
except TypeError:
return results[0]
else:
raise parser.currentError
return doIt
class TrampolinedInterpreterTestCase(OMetaTestCase):
def compile(self, grammar, globals=None):
g = OMeta(grammar)
tree = g.parseGrammar('TestGrammar')
return TrampolinedInterpWrapper(tree, globals)
def test_failure(self):
g = OMeta("""
foo = 'a':one baz:two 'd'+ 'e' -> (one, two)
baz = 'b' | 'c'
""", {})
tree = g.parseGrammar('TestGrammar')
i = TrampolinedGrammarInterpreter(
tree, 'foo', callback=lambda x: setattr(self, 'result', x))
e = self.assertRaises(ParseError, i.receive, 'foobar')
self.assertEqual(str(e),
"\nfoobar\n^\nParse error at line 2, column 0:"
" expected the character 'a'. trail: []\n")
def test_stringConsumedBy(self):
called = []
grammarSource = "rule = <'x'+>:y -> y"
grammar = OMeta(grammarSource).parseGrammar("Parser")
def interp(result, error):
called.append(result)
trampoline = TrampolinedGrammarInterpreter(grammar, "rule", interp)
trampoline.receive("xxxxx")
trampoline.end()
self.assertEqual(called, ["xxxxx"])
class TreeTransformerTestCase(unittest.TestCase):
def compile(self, grammar, namespace=None):
"""
Produce an object capable of parsing via this grammar.
@param grammar: A string containing an OMeta grammar.
"""
if namespace is None:
namespace = globals()
g = TreeTransformerGrammar.makeGrammar(
dedent(grammar), 'TestGrammar').createParserClass(
TreeTransformerBase, namespace)
return g
def test_termForm(self):
g = self.compile("Foo(:left :right) -> left.data + right.data")
self.assertEqual(g.transform(term("Foo(1, 2)"))[0], 3)
def test_termFormNest(self):
g = self.compile("Foo(:left Baz(:right)) -> left.data + right.data")
self.assertEqual(g.transform(term("Foo(1, Baz(2))"))[0], 3)
def test_listForm(self):
g = self.compile("Foo(:left [:first :second]) -> left.data + first.data + second.data")
self.assertEqual(g.transform(term("Foo(1, [2, 3])"))[0], 6)
def test_emptyList(self):
g = self.compile("Foo([]) -> 6")
self.assertEqual(g.transform(term("Foo([])"))[0], 6)
def test_emptyArgs(self):
g = self.compile("Foo() -> 6")
self.assertEqual(g.transform(term("Foo()"))[0], 6)
def test_emptyArgsMeansEmpty(self):
g = self.compile("""
Foo() -> 6
Foo(:x) -> x
""")
self.assertEqual(g.transform(term("Foo(3)"))[0].data, 3)
def test_subTransform(self):
g = self.compile("""
Foo(:left @right) -> left.data + right
Baz(:front :back) -> front.data * back.data
""")
self.assertEqual(g.transform(term("Foo(1, Baz(2, 3))"))[0], 7)
def test_defaultExpand(self):
g = self.compile("""
Foo(:left @right) -> left.data + right
Baz(:front :back) -> front.data * back.data
""")
self.assertEqual(g.transform(term("Blee(Foo(1, 2), Baz(2, 3))"))[0],
term("Blee(3, 6)"))
def test_wide_template(self):
g = self.compile(
"""
Pair(@left @right) --> $left, $right
Name(@n) = ?(n == "a") --> foo
| --> baz
""")
self.assertEqual(g.transform(term('Pair(Name("a"), Name("b"))'))[0],
"foo, baz")
def test_tall_template(self):
g = self.compile(
"""
Name(@n) = ?(n == "a") --> foo
| --> baz
Pair(@left @right) {{{
$left
also, $right
}}}
""")
self.assertEqual(g.transform(term('Pair(Name("a"), Name("b"))'))[0],
"foo\nalso, baz")
def test_tall_template_suite(self):
g = self.compile(
"""
Name(@n) -> n
If(@test @suite) {{{
if $test:
$suite
}}}
""")
self.assertEqual(g.transform(term('If(Name("a"), [Name("foo"), Name("baz")])'))[0],
"if a:\n foo\n baz")
def test_foreign(self):
"""
Rules can call the implementation in a superclass.
"""
grammar_letter = "expr = letter"
GrammarLetter = self.compile(grammar_letter, {})
grammar_digit = "expr '5' = digit"
GrammarDigit = self.compile(grammar_digit, {})
grammar = ("expr = !(grammar_digit_global):grammar_digit "
"GrammarLetter.expr | grammar_digit.expr('5')")
TestGrammar = self.compile(grammar, {
"GrammarLetter": GrammarLetter,
"grammar_digit_global": GrammarDigit
})
self.assertEqual(TestGrammar("x").apply("expr")[0], "x")
self.assertEqual(TestGrammar("3").apply("expr")[0], "3")
class ErrorReportingTests(unittest.TestCase):
def compile(self, grammar):
"""
Produce an object capable of parsing via this grammar.
@param grammar: A string containing an OMeta grammar.
"""
g = OMeta.makeGrammar(grammar, 'TestGrammar').createParserClass(OMetaBase, {})
return HandyWrapper(g)
def test_rawReporting(self):
"""
Errors from parsing contain enough info to figure out what was
expected and where.
"""
g = self.compile("""
start = ( (person feeling target)
| (adjective animal feeling token("some") target))
adjective = token("crazy") | token("clever") | token("awesome")
feeling = token("likes") | token("loves") | token("hates")
animal = token("monkey") | token("horse") | token("unicorn")
person = token("crazy horse") | token("hacker")
target = (token("bananas") | token("robots") | token("americans")
| token("bacon"))
""")
#some warmup
g.start("clever monkey hates some robots")
g.start("awesome unicorn loves some bacon")
g.start("crazy horse hates americans")
g.start("hacker likes robots")
e = self.assertRaises(ParseError, g.start,
"clever hacker likes bacon")
self.assertEqual(e.position, 8)
self.assertEqual(e.error, [('expected', "token", "horse")])
e = self.assertRaises(ParseError, g.start,
"crazy horse likes some grass")
#matching "some" means second branch of 'start' is taken
self.assertEqual(e.position, 23)
self.assertEqual(e.error, [('expected', "token", "bananas"),
('expected', 'token', "bacon"),
('expected', "token", "robots"),
('expected', "token", "americans")])
e = self.assertRaises(ParseError, g.start,
"crazy horse likes mountains")
#no "some" means first branch of 'start' is taken...
#but second is also viable
self.assertEqual(e.position, 18)
self.assertEqual(e.error, [('expected', "token", "some"),
('expected', "token", "bananas"),
('expected', 'token', "bacon"),
('expected', "token", "robots"),
('expected', "token", "americans")])
def test_formattedReporting(self):
"""
Parse errors can be formatted into a nice human-readable view
containing the erroneous input and possible fixes.
"""
g = self.compile("""
dig = '1' | '2' | '3'
bits = <dig>+
""")
input = "123x321"
e = self.assertRaises(ParseError, g.bits, input)
self.assertEqual(e.formatError(),
dedent("""
123x321
^
Parse error at line 1, column 3: expected one of '1', '2', or '3'. trail: [dig]
"""))
input = "foo\nbaz\nboz\ncharlie\nbuz"
e = ParseError(input, 12, expected('token', 'foo') + expected(None, 'b'))
self.assertEqual(e.formatError(),
dedent("""
charlie
^
Parse error at line 4, column 0: expected one of 'b', or token 'foo'. trail: []
"""))
input = '123x321'
e = ParseError(input, 3, expected('digit'))
self.assertEqual(e.formatError(),
dedent("""
123x321
^
Parse error at line 1, column 3: expected a digit. trail: []
"""))
|
pde/torbrowser-launcher
|
lib/Parsley-1.1/ometa/test/test_pymeta.py
|
Python
|
mit
| 48,024
|
#! /usr/bin/env python
#-*- coding: utf-8 -*-
# ***** BEGIN LICENSE BLOCK *****
# This file is part of Shelter Database.
# Copyright (c) 2016 Luxembourg Institute of Science and Technology.
# All rights reserved.
#
#
#
# ***** END LICENSE BLOCK *****
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 0.2 $"
__date__ = "$Date: 2016/05/31$"
__revision__ = "$Date: 2016/06/11 $"
__copyright__ = "Copyright 2016 Luxembourg Institute of Science and Technology"
__license__ = ""
import logging
import datetime
from werkzeug import generate_password_hash
from flask import (render_template, flash, session, request,
url_for, redirect, current_app, g)
from flask_login import LoginManager, logout_user, \
login_required, current_user
from flask_principal import (Principal, AnonymousIdentity, UserNeed,
identity_changed, identity_loaded,
session_identity_loader)
import conf
from bootstrap import db
from web.views.common import admin_role, login_user_bundle
from web.models import User
from web.forms import LoginForm #, SignupForm
from web.lib.utils import HumanitarianId
#from notifications import notifications
Principal(current_app)
# Create a permission with a single Need, in this case a RoleNeed.
login_manager = LoginManager()
login_manager.init_app(current_app)
login_manager.login_message = u"Please log in to access this page."
login_manager.login_message_category = "warning"
login_manager.login_view = 'login'
logger = logging.getLogger(__name__)
@identity_loaded.connect_via(current_app._get_current_object())
def on_identity_loaded(sender, identity):
# Set the identity user object
identity.user = current_user
# Add the UserNeed to the identity
if current_user.is_authenticated:
identity.provides.add(UserNeed(current_user.id))
if current_user.is_admin:
identity.provides.add(admin_role)
@login_manager.user_loader
def load_user(user_id):
return User.query.filter(User.id==user_id, User.is_active==True).first()
@current_app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.datetime.now()
db.session.commit()
@current_app.route('/login', methods=['GET'])
def join():
if current_user.is_authenticated or HumanitarianId().login():
return redirect(url_for('index'))
form = LoginForm()
#signup = SignupForm()
return render_template(
'login.html',
humanitarian_id_auth_uri=conf.HUMANITARIAN_ID_AUTH_URI,
client_id=conf.HUMANITARIAN_ID_CLIENT_ID,
redirect_uri=conf.HUMANITARIAN_ID_REDIRECT_URI,
loginForm=form #, signupForm=signup
)
@current_app.route('/login', methods=['POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
flash('You are logged in', 'info')
login_user_bundle(form.user)
return form.redirect('index')
#signup = SignupForm()
return render_template(
'login.html',
humanitarian_id_auth_uri=conf.HUMANITARIAN_ID_AUTH_URI,
client_id=conf.HUMANITARIAN_ID_CLIENT_ID,
redirect_uri=conf.HUMANITARIAN_ID_REDIRECT_URI,
loginForm=form #, signupForm=signup
)
@current_app.route('/callback/humanitarianid', methods=['GET'])
def login_humanitarianid():
if current_user.is_authenticated:
return redirect(url_for('index'))
access_token = request.values.get('access_token', None)
if access_token:
session['hid_access_token'] = access_token
return redirect(url_for('join'))
return render_template('humanitarianid_login.html')
@current_app.route('/logout')
@login_required
def logout():
# Remove the user information from the session
logout_user()
flash('You are logged out', 'warning')
# Remove session keys set by Flask-Principal
for key in ('identity.name', 'identity.auth_type', 'hid_access_token'):
session.pop(key, None)
# Tell Flask-Principal the user is anonymous
identity_changed.send(current_app, identity=AnonymousIdentity())
session_identity_loader()
if request.values.get('hid_logout'):
return redirect(conf.HUMANITARIAN_ID_AUTH_URI+'/logout')
return redirect(url_for('index'))
#@current_app.route('/signup', methods=['POST'])
#def signup():
# """if not conf.SELF_REGISTRATION:
# flash("Self-registration is disabled.", 'warning')
# return redirect(url_for('index'))"""
# if current_user.is_authenticated:
# return redirect(url_for('index'))#
#
# form = SignupForm()
# if form.validate_on_submit():
# user = User(name=form.name.data,
# email=form.email.data,
# pwdhash=generate_password_hash(form.password.data),
# is_active=True)
# db.session.add(user)
# db.session.commit()
# flash('Your account has been created. ', 'success')
# login_user_bundle(user) # automatically log the user
#
# return form.redirect('index')
#
# loginForm = LoginForm()
# return render_template(
# 'join.html',
# loginForm=loginForm, signupForm=form
# )
|
rodekruis/shelter-database
|
src/web/views/session_mgmt.py
|
Python
|
mit
| 5,400
|
from image_mat_util import *
from mat import Mat
from vec import Vec
import matutil
from solver import solve
## Task 1
def move2board(v):
'''
Input:
- v: a vector with domain {'y1','y2','y3'}, the coordinate representation of a point q.
Output:
- A {'y1','y2','y3'}-vector z, the coordinate representation
in whiteboard coordinates of the point p such that the line through the
origin and q intersects the whiteboard plane at p.
'''
return Vec({'y1','y2','y3'}, { key:val/v.f['y3'] for key, val in v.f.items() })
## Task 2
def make_equations(x1, x2, w1, w2):
'''
Input:
- x1 & x2: photo coordinates of a point on the board
- y1 & y2: whiteboard coordinates of a point on the board
Output:
- List [u,v] where u*h = 0 and v*h = 0
'''
domain = {(a, b) for a in {'y1', 'y2', 'y3'} for b in {'x1', 'x2', 'x3'}}
u = Vec(domain, {('y3','x1'):w1*x1,('y3','x2'):w1*x2,('y3','x3'):w1,('y1','x1'):-x1,('y1','x2'):-x2,('y1','x3'):-1})
v = Vec(domain, {('y3','x1'):w2*x1,('y3','x2'):w2*x2,('y3','x3'):w2,('y2','x1'):-x1,('y2','x2'):-x2,('y2','x3'):-1})
return [u, v]
## Task 3
H = Mat(({'y1', 'y3', 'y2'}, {'x2', 'x3', 'x1'}), {('y3', 'x1'): -0.7219356810710031, ('y2', 'x1'): -0.3815213180054361, ('y2', 'x2'): 0.7378180860600992, ('y1', 'x1'): 1.0, ('y2', 'x3'): 110.0231807477826, ('y3', 'x3'): 669.4762699006177, ('y1', 'x3'): -359.86096256684493, ('y3', 'x2'): -0.011690730864965311, ('y1', 'x2'): 0.05169340463458105})
## Task 4
def mat_move2board(Y):
'''
Input:
- Y: Mat instance, each column of which is a 'y1', 'y2', 'y3' vector
giving the whiteboard coordinates of a point q.
Output:
- Mat instance, each column of which is the corresponding point in the
whiteboard plane (the point of intersection with the whiteboard plane
of the line through the origin and q).
'''
col_dict = matutil.mat2coldict(Y)
new_col_dic = {}
for key, val in col_dict.items():
new_col_dic[key] = Vec(val.D, { k:v/val.f['y3'] for k, v in val.f.items() })
return matutil.coldict2mat(new_col_dic)
# import perspective_lab
# from mat import Mat
# import vecutil
# import matutil
# import image_mat_util
# from vec import Vec
# from GF2 import one
# from solver import solve
# row_dict = {}
# row_dict[0] = perspective_lab.make_equations(358, 36, 0, 0)[0]
# row_dict[1] = perspective_lab.make_equations(358, 36, 0, 0)[1]
# row_dict[2] = perspective_lab.make_equations(329, 597, 0, 1)[0]
# row_dict[3] = perspective_lab.make_equations(329, 597, 0, 1)[1]
# row_dict[4] = perspective_lab.make_equations(592, 157, 1, 0)[0]
# row_dict[5] = perspective_lab.make_equations(592, 157, 1, 0)[1]
# row_dict[6] = perspective_lab.make_equations(580, 483, 1, 1)[0]
# row_dict[7] = perspective_lab.make_equations(580, 483, 1, 1)[1]
# foo = perspective_lab.make_equations(0, 0, 0, 0)[0]
# foo[('y1', 'x1')] = 1
# foo[('y1', 'x3')] = 0
# row_dict[8] = foo
# M = matutil.rowdict2mat(row_dict)
# print(M)
# solve(M, vecutil.list2vec([0, 0, 0, 0, 0, 0, 0, 0, 1]))
# Y_in = Mat(({'y1', 'y2', 'y3'}, {0,1,2,3}),
# {('y1',0):2, ('y2',0):4, ('y3',0):8,
# ('y1',1):10, ('y2',1):5, ('y3',1):5,
# ('y1',2):4, ('y2',2):25, ('y3',2):2,
# ('y1',3):5, ('y2',3):10, ('y3',3):4})
# print(Y_in)
# print(perspective_lab.mat_move2board(Y_in))
# (X_pts, colors) = image_mat_util.file2mat('board.png', ('x1','x2','x3'))
# H = Mat(({'y1', 'y3', 'y2'}, {'x2', 'x3', 'x1'}), {('y3', 'x1'): -0.7219356810710031, ('y2', 'x1'): -0.3815213180054361, ('y2', 'x2'): 0.7378180860600992, ('y1', 'x1'): 1.0, ('y2', 'x3'): 110.0231807477826, ('y3', 'x3'): 669.4762699006177, ('y1', 'x3'): -359.86096256684493, ('y3', 'x2'): -0.011690730864965311, ('y1', 'x2'): 0.05169340463458105})
# Y_pts = H * X_pts
# Y_board = perspective_lab.mat_move2board(Y_pts)
# image_mat_util.mat2display(Y_board, colors, ('y1', 'y2', 'y3'),
# scale=100, xmin=None, ymin=None)
|
vvw/linearAlgebra-coursera
|
assignment 5/perspective_lab/perspective_lab.py
|
Python
|
mit
| 4,055
|
# -*- coding: utf-8 -*-
# Authors: Y. Jia <ytjia.zju@gmail.com>
import unittest
from common.BinaryTree import BinaryTree
from .. import BinaryTreeMaximumPathSum
class test_BinaryTreeMaximumPathSum(unittest.TestCase):
solution = BinaryTreeMaximumPathSum.Solution()
def test_maxPathSum(self):
self.assertEqual(self.solution.maxPathSum(BinaryTree.create_tree([1, 2, 3])[0]), 6)
if __name__ == '__main__':
unittest.main()
|
ytjia/coding-practice
|
algorithms/python/leetcode/tests/test_BinaryTreeMaximumPathSum.py
|
Python
|
mit
| 446
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2011,2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Wire Data Helper"""
import dns.exception
from ._compat import binary_type, string_types, PY2
# Figure out what constant python passes for an unspecified slice bound.
# It's supposed to be sys.maxint, yet on 64-bit windows sys.maxint is 2^31 - 1
# but Python uses 2^63 - 1 as the constant. Rather than making pointless
# extra comparisons, duplicating code, or weakening WireData, we just figure
# out what constant Python will use.
class _SliceUnspecifiedBound(binary_type):
def __getitem__(self, key):
return key.stop
if PY2:
def __getslice__(self, i, j): # pylint: disable=getslice-method
return self.__getitem__(slice(i, j))
_unspecified_bound = _SliceUnspecifiedBound()[1:]
class WireData(binary_type):
# WireData is a binary type with stricter slicing
def __getitem__(self, key):
try:
if isinstance(key, slice):
# make sure we are not going outside of valid ranges,
# do stricter control of boundaries than python does
# by default
start = key.start
stop = key.stop
if PY2:
if stop == _unspecified_bound:
# handle the case where the right bound is unspecified
stop = len(self)
if start < 0 or stop < 0:
raise dns.exception.FormError
# If it's not an empty slice, access left and right bounds
# to make sure they're valid
if start != stop:
super(WireData, self).__getitem__(start)
super(WireData, self).__getitem__(stop - 1)
else:
for index in (start, stop):
if index is None:
continue
elif abs(index) > len(self):
raise dns.exception.FormError
return WireData(super(WireData, self).__getitem__(
slice(start, stop)))
return bytearray(self.unwrap())[key]
except IndexError:
raise dns.exception.FormError
if PY2:
def __getslice__(self, i, j): # pylint: disable=getslice-method
return self.__getitem__(slice(i, j))
def __iter__(self):
i = 0
while 1:
try:
yield self[i]
i += 1
except dns.exception.FormError:
raise StopIteration
def unwrap(self):
return binary_type(self)
def maybe_wrap(wire):
if isinstance(wire, WireData):
return wire
elif isinstance(wire, binary_type):
return WireData(wire)
elif isinstance(wire, string_types):
return WireData(wire.encode())
raise ValueError("unhandled type %s" % type(wire))
|
waynechu/PythonProject
|
dns/wiredata.py
|
Python
|
mit
| 3,751
|
"""
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "firstapp.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
nickromano/django-slow-tests
|
_examples/django17/firstapp/wsgi.py
|
Python
|
mit
| 389
|
from cupy import elementwise
_id = 'out0 = in0'
# TODO(okuta): Implement convolve
_clip = elementwise.create_ufunc(
'cupy_clip',
('???->?', 'bbb->b', 'BBB->B', 'hhh->h', 'HHH->H', 'iii->i', 'III->I',
'lll->l', 'LLL->L', 'qqq->q', 'QQQ->Q', 'eee->e', 'fff->f', 'ddd->d'),
'out0 = min(in2, max(in1, in0))')
def clip(a, a_min, a_max, out=None):
'''Clips the values of an array to a given interval.
This is equivalent to ``maximum(minimum(a, a_max), a_min)``, while this
function is more efficient.
Args:
a (cupy.ndarray): The source array.
a_min (scalar or cupy.ndarray): The left side of the interval.
a_max (scalar or cupy.ndarray): The right side of the interval.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: Clipped array.
.. seealso:: :func:`numpy.clip`
'''
return _clip(a, a_min, a_max, out=out)
sqrt = elementwise.create_ufunc(
'cupy_sqrt',
# I think this order is a bug of NumPy, though we select this "buggy"
# behavior for compatibility with NumPy.
('f->f', 'd->d', 'e->e'),
'out0 = sqrt(in0)',
doc='''Elementwise positive square-root function.
.. note::
This ufunc outputs float32 arrays for float16 arrays input by default as
well as NumPy 1.9. If you want to override this behavior, specify the
dtype argument explicitly, or use ``cupy.math.misc.sqrt_fixed`` instead.
.. seealso:: :data:`numpy.sqrt`
''')
# Fixed version of sqrt
sqrt_fixed = elementwise.create_ufunc(
'cupy_sqrt',
('e->e', 'f->f', 'd->d'),
'out0 = sqrt(in0)')
square = elementwise.create_ufunc(
'cupy_square',
('b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q',
'Q->Q', 'e->e', 'f->f', 'd->d'),
'out0 = in0 * in0',
doc='''Elementwise square function.
.. seealso:: :data:`numpy.square`
''')
absolute = elementwise.create_ufunc(
'cupy_absolute',
(('?->?', _id), 'b->b', ('B->B', _id), 'h->h', ('H->H', _id), 'i->i',
('I->I', _id), 'l->l', ('L->L', _id), 'q->q', ('Q->Q', _id),
('e->e', 'out0 = fabsf(in0)'),
('f->f', 'out0 = fabsf(in0)'),
('d->d', 'out0 = fabs(in0)')),
'out0 = in0 > 0 ? in0 : -in0',
doc='''Elementwise absolute value function.
.. seealso:: :data:`numpy.absolute`
''')
# TODO(beam2d): Implement it
# fabs
_unsigned_sign = 'out0 = in0 > 0'
sign = elementwise.create_ufunc(
'cupy_sign',
('b->b', ('B->B', _unsigned_sign), 'h->h', ('H->H', _unsigned_sign),
'i->i', ('I->I', _unsigned_sign), 'l->l', ('L->L', _unsigned_sign),
'q->q', ('Q->Q', _unsigned_sign), 'e->e', 'f->f', 'd->d'),
'out0 = (in0 > 0) - (in0 < 0)',
doc='''Elementwise sign function.
It returns -1, 0, or 1 depending on the sign of the input.
.. seealso:: :data:`numpy.sign`
''')
_float_maximum = \
'out0 = isnan(in0) ? in0 : isnan(in1) ? in1 : max(in0, in1)'
maximum = elementwise.create_ufunc(
'cupy_maximum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_maximum),
('ff->f', _float_maximum),
('dd->d', _float_maximum)),
'out0 = max(in0, in1)',
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.maximum`
''')
_float_minimum = \
'out0 = isnan(in0) ? in0 : isnan(in1) ? in1 : min(in0, in1)'
minimum = elementwise.create_ufunc(
'cupy_minimum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_minimum),
('ff->f', _float_minimum),
('dd->d', _float_minimum)),
'out0 = min(in0, in1)',
doc='''Takes the minimum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.minimum`
''')
fmax = elementwise.create_ufunc(
'cupy_fmax',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
'out0 = max(in0, in1)',
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the other operand.
.. seealso:: :data:`numpy.fmax`
''')
fmin = elementwise.create_ufunc(
'cupy_fmin',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d'),
'out0 = min(in0, in1)',
doc='''Takes the minimum of two arrays elementwise.
If NaN apperas, it returns the other operand.
.. seealso:: :data:`numpy.fmin`
''')
# TODO(okuta): Implement nan_to_num
# TODO(okuta): Implement real_if_close
# TODO(okuta): Implement interp
|
tscohen/chainer
|
cupy/math/misc.py
|
Python
|
mit
| 4,772
|
from common2 import *
# NAME IDEA -> pooling/random/sparse/distributed hebbian/horde/crowd/fragment/sample memory
# FEATURES:
# + boost -- neurons with empty mem slots learn faster
# + noise --
# + dropout -- temporal disabling of neurons
# + decay -- remove from mem
# + negatives -- learning to avoid detecting some patterns
# + fatigue -- winner has lower score for some time
# ~ sklearn -- compatibile api
# - prune -- if input < mem shrink mem ? (problem with m > input len)
# - weights -- sample weights for imbalanced classes
# - popularity -- most popular neuron is cloned / killed
# NEXT VERSION:
# - attention
# - https://towardsdatascience.com/the-fall-of-rnn-lstm-2d1594c74ce0
# - https://towardsdatascience.com/memory-attention-sequences-37456d271992
# - https://medium.com/breathe-publication/neural-networks-building-blocks-a5c47bcd7c8d
# - https://distill.pub/2016/augmented-rnns/
# - http://akosiorek.github.io/ml/2017/10/14/visual-attention.html
# + IDEA:
# append activated neurons indexes to queue available as input
# queue ages at constant rate and drops oldest values
# - IDEA:
# each neuron has small memory of activation prior to winning
# this memory is compared to ctx and intersection added to score
# winner updated this memory
# OPTION: several memories with diferent time frames
# NEXT VERSION:
# - layers -- rsm stacking
# NEXT VERSIONS:
# - numpy -- faster version
# - cython -- faster version
# - gpu -- faster version
# - distributed
class rsm:
def __init__(self,n,m,c=0,**kw):
"""Random Sample Memory
n -- number of neurons
m -- max connections per neuron (memory)
"""
self.mem = {j:set() for j in range(n)}
self.win = {j:0 for j in range(n)}
self.tow = {j:-42000 for j in range(n)} # time of win
self.t = 0
self.ctx = deque(maxlen=c) # context queue
# cfg
cfg = {}
cfg['n'] = n
cfg['m'] = m
cfg['c'] = c
cfg['k'] = kw.get('k',1)
cfg['method'] = kw.get('method',1)
cfg['cutoff'] = kw.get('cutoff',0.5)
cfg['decay'] = kw.get('decay',0.0)
cfg['dropout'] = kw.get('dropout',0.0)
cfg['fatigue'] = kw.get('fatigue',0)
cfg['boost'] = kw.get('boost',True)
cfg['noise'] = kw.get('noise',True)
cfg['sequence'] = kw.get('sequence',False)
cfg.update(kw)
self.cfg = cfg
# ---[ core ]---------------------------------------------------------------
def new_ctx(self):
self.ctx.clear()
# TODO -- input length vs mem length
# TODO -- args from cfg
def scores(self, input, raw=False, boost=False, noise=False, fatigue=0, dropout=0.0, **ignore): # -> dict[i] -> scores
"""
input -- sparse binary features
raw -- disable all postprocessing
boost -- improve scores based on number of unconnected synapses (TODO)
noise -- randomize scores to prevent snowballing
dropout -- temporal disabling of neurons
"""
mem = self.mem
tow = self.tow
N = self.cfg['n']
M = self.cfg['m']
t = self.t
scores = {}
for j in mem:
scores[j] = len(set(input) & mem[j])
if raw:
return scores
if noise:
for j in mem:
scores[j] += 0.9*random()
if boost:
for j in mem:
scores[j] += 1+2*(M-len(mem[j])) if len(mem[j])<M else 0
# TODO boost also based on low win ratio / low tow
if fatigue:
for j in mem:
dt = 1.0*min(fatigue,t - tow[j])
factor = dt / fatigue
scores[j] *= factor
if dropout:
k = int(round(float(dropout)*N))
for j in combinations(N,k):
scores[j] = -1
return scores
def learn(self, input, negative=False, **ignore):
for i in range(0,len(input),10):
self.learn_(set(input[i:i+10]),negative=negative)
def learn_(self, input, negative=False, **ignore):
"""
input -- sparse binary features
k -- number of winning neurons
"""
mem = self.mem
win = self.win
tow = self.tow
ctx = self.ctx
t = self.t
cfg = self.cfg
M = self.cfg['m']
N = self.cfg['n']
k = self.cfg['k']
decay = self.cfg['decay']
sequence = self.cfg['sequence']
known_inputs = set()
for j in mem:
known_inputs.update(mem[j])
# context
input = input | set(ctx)
# scoring
scores = self.scores(input, **cfg)
winners = top(k,scores)
for j in winners:
# negative learning
if negative:
mem[j].difference_update(input)
continue
# positive learning
unknown_inputs = input - known_inputs
mem[j].update(pick(unknown_inputs, M-len(mem[j])))
known_inputs.update(mem[j])
# handle decay
if decay:
decay_candidates = mem[j] - input
if decay_candidates:
for d in decay_candidates:
if random() < decay:
mem[j].remove(d)
# handle popularity
win[j] += 1
# handle fatigue
tow[j] = t
# handle context
if sequence:
for i in range(len(ctx)):
ctx[i] -= N
for j in winners:
ctx.append(-j-1)
self.t += 1
# ---[ auxiliary ]----------------------------------------------------------
def fit(self, X, Y):
cfg = self.cfg
for x,y in zip(X,Y):
negative = not y
self.learn(x,negative=negative,**cfg)
def fit2(self, X1, X0):
cfg = self.cfg
# TODO - unbalanced
for x1,x0 in zip(X1,X0):
self.learn(x1,negative=False,**cfg)
self.learn(x0,negative=True,**cfg)
def transform(self, X):
cutoff = self.cfg['cutoff']
out = []
for s in self.score_many(X):
y = 1 if s>=cutoff else 0
out += [y]
return out
def fit_transform(self, X, Y):
self.fit(X,Y)
return self.transform(X)
def score(self, X, Y, kind='acc'):
c = self.confusion(X,Y)
p = float(c['p'])
n = float(c['n'])
tp = float(c['tp'])
tn = float(c['tn'])
fp = float(c['fp'])
fn = float(c['fn'])
try:
if kind=='acc':
return (tp + tn) / (p + n)
elif kind=='f1':
return (2*tp) / (2*tp + fp + fn)
elif kind=='prec':
return tp / (tp + fp)
elif kind=='sens':
return tp / (tp + fn)
elif kind=='spec':
return tn / (tn + fp)
except ZeroDivisionError:
return float('nan')
def confusion(self, X, Y):
PY = self.transform(X)
p = 0
n = 0
tp = 0
tn = 0
fp = 0
fn = 0
for y,py in zip(Y,PY):
if y: p+=1
else: n+=1
if y:
if py: tp+=1
else: fn+=1
else:
if py: fp+=1
else: tn+=1
return dict(p=p,n=n,tp=tp,tn=tn,fp=fp,fn=fn)
def score_many(self, X):
out = []
for x in X:
s = self.score_one(x)
out += [s]
return out
# TODO
def calibrate(self, X, Y, kind='f1'):
for i in range(1,20):
c = 0.05*i
self.set_params(cutoff=c)
s = self.score(X,Y,kind)
print'{} {:.3} -> {:.3}'.format(kind,c,s)
def score_one(self, input):
"aggregate scores to scalar"
k = self.cfg['k']
method = self.cfg['method']
scores = self.scores(input)
M = self.cfg['m']
if method==0:
return top(k, scores, values=True)
elif method==1:
score = 1.0*sum(top(k, scores, values=True))/(k*(M+1))
return score
elif method==2:
score = 1.0*sum(top(k, scores, values=True))/(k*M)
return min(1.0,score)
if method==3:
score = 1.0*min(top(k, scores, values=True))/(M+1)
return score
elif method==4:
score = 1.0*min(top(k, scores, values=True))/M
return min(1.0,score)
if method==5:
score = 1.0*max(top(k, scores, values=True))/(M+1)
return score
elif method==6:
score = 1.0*max(top(k, scores, values=True))/M
return min(1.0,score)
def stats(self,prefix=''):
N = self.cfg['n']
M = self.cfg['m']
mem_v = self.mem.values()
out = {}
# mem
out['mem_empty'] = sum([1.0 if len(x)==0 else 0.0 for x in mem_v])/N
out['mem_not_empty'] = sum([1.0 if len(x)>0 else 0.0 for x in mem_v])/N
out['mem_full'] = sum([1.0 if len(x)==M else 0.0 for x in mem_v])/N
out['mem_avg'] = sum([1.0*len(x) for x in mem_v])/(N*M)
# win
win = list(sorted(self.win.values()))
out['win_min'] = win[0]
out['win_max'] = win[-1]
gini = 0
for a in win:
for b in win:
gini += abs(a-b)
gini = float(gini)/(2.0*len(win)*sum(win))
out['win_gini'] = round(gini,3)
# ctx
out['ctx_mem_sum'] = sum([1 if x<0 else 0 for m in mem_v for x in m])
out['ctx_mem_cnt'] = sum([max([1 if x<0 else 0 for x in m]) for m in mem_v if m])
out['ctx_mem_max'] = max([sum([1 if x<0 else 0 for x in m]) for m in mem_v if m])
#
return {k:v for k,v in out.items() if k.startswith(prefix)}
def set_params(self,**kw):
self.cfg.update(kw)
# TODO: deep parameter
def get_params(self,deep=True):
return self.cfg # TODO copy ???
|
mobarski/sandbox
|
rsm/v9le/v5.py
|
Python
|
mit
| 8,419
|
from setuptools import setup, find_packages
version = "6.3.0"
with open("requirements.txt", "r") as f:
install_requires = f.readlines()
setup(
name='frappe',
version=version,
description='Metadata driven, full-stack web framework',
author='Frappe Technologies',
author_email='info@frappe.io',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
|
indictranstech/omnitech-frappe
|
setup.py
|
Python
|
mit
| 438
|
#! /usr/bin/env python3
#
# importing_modules.py
#
# Author: Billy Wilson Arante
# Created: 2/24/2016 PHT
#
import fibo
def test():
"""Test cases."""
print('Example 1:')
fibo.fib(1000)
print('Example 2:')
print(fibo.fib1(1000))
print('Example 3:')
print(fibo.__name__)
# Assigning function a local name
fib = fibo.fib
print('Example 4:')
fib(1000)
if __name__ == '__main__':
test()
|
arantebillywilson/python-snippets
|
py3/py344-tutor/ch06-modules/importing_modules.py
|
Python
|
mit
| 444
|
import cv2
import os
import scipy.misc as misc
path = os.getcwd()
for x in xrange(15):
rgb = cv2.imread(path + '/output/rgb_img_' + str(x) + '.jpg')
depth = cv2.imread(path + '/output/depth_img_' + str(x) + '.jpg')
depth_inquiry = depth.copy()
# depth_inquiry[depth_inquiry > 180] = 0
# Depth threshold test - gets rid of near
depth_inquiry[depth_inquiry < 50] = 0
# depth_inquiry[depth_inquiry > 0] = 255
median = cv2.medianBlur(depth_inquiry,5)
median = cv2.cvtColor(median, cv2.COLOR_BGR2GRAY)
cv2.imshow('depth', median)
cv2.waitKey(0)
depth_params = (424, 512)
rgb_params = (1080, 1920)
depth_adjusted = (rgb_params[0], depth_params[1]*rgb_params[0]/depth_params[0])
rgb_cropped = rgb[:, rgb_params[1]/2-depth_adjusted[1]/2:rgb_params[1]/2+depth_adjusted[1]/2]
resized_median = cv2.resize(median, (depth_adjusted[1], depth_adjusted[0]), interpolation = cv2.INTER_AREA)
and_result = cv2.bitwise_and(rgb_cropped,rgb_cropped,mask=resized_median)
cv2.imshow('and', and_result)
cv2.imwrite(path +'/output/rgb_depth_adjusted_' + str(x) + '.jpg', rgb_cropped )
|
J0Nreynolds/Spartifai
|
rgb_depth_exclusion.py
|
Python
|
mit
| 1,140
|
"""distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
__revision__ = "$Id: fancy_getopt.py 58495 2007-10-16 18:12:55Z guido.van.rossum $"
import sys, string, re
import getopt
from distutils.errors import *
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = lambda s: s.replace('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__(self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
def _build_index(self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table(self, option_table):
self.option_table = option_table
self._build_index()
def add_option(self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError(
"option conflict: already an option '%s'" % long_option)
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option(self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name(self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return longopt_xlate(long_option)
def _check_alias_dict(self, aliases, what):
assert isinstance(aliases, dict)
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias))
if opt not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt))
def set_aliases(self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases(self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table(self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError("invalid option tuple: %r" % (option,))
# Type- and value-check the option names
if not isinstance(long, str) or len(long) < 2:
raise DistutilsGetoptError(("invalid long option '%s': "
"must be a string of length >= 2") % long)
if (not ((short is None) or
(isinstance(short, str) and len(short) == 1))):
raise DistutilsGetoptError("invalid short option '%s': "
"must a single character or None" % short)
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid negative alias '%s': "
"aliased option '%s' takes a value"
% (long, alias_to))
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't"
% (long, alias_to))
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError(
"invalid long option name '%s' "
"(must be letters, numbers, hyphens only" % long)
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
def getopt(self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = True
else:
created_object = False
self._grok_option_table()
short_opts = ' '.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error as msg:
raise DistutilsArgError(msg)
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
def get_option_order(self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError("'getopt()' hasn't been called yet")
else:
return self.option_order
def generate_help(self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
return lines
def print_help(self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
def fancy_getopt(options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
def wrap_text(text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = text.expandtabs()
text = text.translate(WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(''.join(cur_line))
return lines
def translate_longopt(opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return longopt_xlate(opt)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__(self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
if __name__ == "__main__":
text = """\
Tra-la-la, supercalifragilisticexpialidocious.
How *do* you spell that odd word, anyways?
(Someone ask Mary -- she'll know [or she'll
say, "How should I know?"].)"""
for w in (10, 20, 30, 40):
print("width: %d" % w)
print("\n".join(wrap_text(text, w)))
print()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.1/Lib/distutils/fancy_getopt.py
|
Python
|
mit
| 17,855
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/creature/npc/droid/shared_wed_treadwell_base.iff"
result.attribute_template_id = 3
result.stfName("droid_name","wed_treadwell_base")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/creature/npc/droid/shared_wed_treadwell_base.py
|
Python
|
mit
| 460
|
from decouple import Csv, config
from dj_database_url import parse as db_url
from .base import * # noqa
DEBUG = False
SECRET_KEY = config('SECRET_KEY')
DATABASES = {
'default': config('DATABASE_URL', cast=db_url),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
MEDIA_ROOT = 'mediafiles'
MEDIA_URL = '/media/'
SERVER_EMAIL = 'foo@example.com'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = config('SENDGRID_USERNAME')
EMAIL_HOST_PASSWORD = config('SENDGRID_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Security
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
CSRF_COOKIE_HTTPONLY = True
# Webpack
WEBPACK_LOADER['DEFAULT']['CACHE'] = True
# Celery
CELERY_BROKER_URL = config('REDIS_URL')
CELERY_RESULT_BACKEND = config('REDIS_URL')
CELERY_SEND_TASK_ERROR_EMAILS = True
# Whitenoise
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MIDDLEWARE.insert( # insert WhiteNoiseMiddleware right after SecurityMiddleware
MIDDLEWARE.index('django.middleware.security.SecurityMiddleware') + 1,
'whitenoise.middleware.WhiteNoiseMiddleware')
# django-log-request-id
MIDDLEWARE.insert( # insert RequestIDMiddleware on the top
0, 'log_request_id.middleware.RequestIDMiddleware')
LOG_REQUEST_ID_HEADER = 'HTTP_X_REQUEST_ID'
LOG_REQUESTS = True
# Opbeat
INSTALLED_APPS += ['opbeat.contrib.django']
MIDDLEWARE.insert( # insert OpbeatAPMMiddleware on the top
0, 'opbeat.contrib.django.middleware.OpbeatAPMMiddleware')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'request_id': {
'()': 'log_request_id.filters.RequestIDFilter'
},
},
'formatters': {
'standard': {
'format': '%(levelname)-8s [%(asctime)s] [%(request_id)s] %(name)s: %(message)s'
},
},
'handlers': {
'null': {
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'filters': ['request_id'],
'formatter': 'standard',
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'INFO'
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'log_request_id.middleware': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
}
}
|
camilaavilarinho/monitorador-twitter
|
monitortwitter/settings/production.py
|
Python
|
mit
| 3,261
|
from setuptools import setup, find_packages
DESCRIPTION = """
Send emails based on a Django template
See:
https://github.com/prestontimmons/django-email-template
"""
setup(
name="django-email-template",
version="1.0.2",
description="Send emails based on a Django template",
long_description=DESCRIPTION,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[],
)
|
funkybob/django-email-template
|
setup.py
|
Python
|
mit
| 433
|
import django_filters
from dal import autocomplete
from .models import SkosConcept, SkosConceptScheme
django_filters.filters.LOOKUP_TYPES = [
('', '---------'),
('exact', 'Is equal to'),
('iexact', 'Is equal to (case insensitive)'),
('not_exact', 'Is not equal to'),
('lt', 'Lesser than/before'),
('gt', 'Greater than/after'),
('gte', 'Greater than or equal to'),
('lte', 'Lesser than or equal to'),
('startswith', 'Starts with'),
('endswith', 'Ends with'),
('contains', 'Contains'),
('icontains', 'Contains (case insensitive)'),
('not_contains', 'Does not contain'),
]
class SkosConceptFilter(django_filters.FilterSet):
pref_label = django_filters.ModelMultipleChoiceFilter(
widget=autocomplete.Select2Multiple(url='vocabs-ac:skosconcept-autocomplete'),
queryset=SkosConcept.objects.all(),
lookup_expr='icontains',
label='PrefLabel',
help_text=False,
)
scheme = django_filters.ModelMultipleChoiceFilter(
queryset=SkosConceptScheme.objects.all(),
lookup_expr='icontains',
label='in SkosConceptScheme',
help_text=False,
)
class Meta:
model = SkosConcept
fields = '__all__'
|
vanyh/handkeinzungen-app
|
vocabs/filters.py
|
Python
|
mit
| 1,239
|
from django.test import override_settings, SimpleTestCase
from arcutils.settings import NO_DEFAULT, PrefixedSettings, get_setting
@override_settings(ARC={
'a': 'a',
'b': [0, 1],
'c': [{'c': 'c'}],
'd': 'd',
})
class TestGetSettings(SimpleTestCase):
def get_setting(self, key, default=NO_DEFAULT):
return get_setting(key, default=default)
def test_can_traverse_into_dict(self):
self.assertEqual(self.get_setting('ARC.a'), 'a')
def test_can_traverse_into_dict_then_list(self):
self.assertEqual(self.get_setting('ARC.b.0'), 0)
def test_can_traverse_into_list_then_dict(self):
self.assertEqual(self.get_setting('ARC.c.0.c'), 'c')
def test_returns_default_for_non_existent_root(self):
default = object()
self.assertIs(self.get_setting('NOPE', default), default)
def test_returns_default_for_non_existent_nested_setting(self):
default = object()
self.assertIs(self.get_setting('ARC.nope', default), default)
def test_raises_when_not_found_and_no_default(self):
self.assertRaises(KeyError, self.get_setting, 'NOPE')
def test_can_traverse_into_string_setting(self):
self.assertEqual(self.get_setting('ARC.d.0'), 'd')
def test_bad_index_causes_type_error(self):
self.assertRaises(TypeError, self.get_setting, 'ARC.b.nope')
@override_settings(CAS={
'extra': 'extra',
'overridden': 'overridden',
})
class TestGetPrefixedSettings(SimpleTestCase):
def setUp(self):
super().setUp()
defaults = {
'base_url': 'http://example.com/cas/',
'parent': {
'child': 'child',
},
'overridden': 'default',
}
self.settings = PrefixedSettings('CAS', defaults)
def test_get_from_defaults(self):
self.assertEqual(self.settings.get('base_url'), 'http://example.com/cas/')
def test_get_nested_from_defaults(self):
self.assertEqual(self.settings.get('parent.child'), 'child')
def test_get_from_project_settings(self):
self.assertEqual(self.settings.get('extra'), 'extra')
def test_get_setting_overridden_in_project_settings(self):
self.assertEqual(self.settings.get('overridden'), 'overridden')
def test_defaults_trump_passed_default(self):
self.assertEqual(
self.settings.get('base_url', 'http://example.com/other/'),
'http://example.com/cas/')
def test_passed_default_does_not_trump_project_setting(self):
self.assertEqual(self.settings.get('extra', 'default'), 'extra')
def test_get_default_for_nonexistent(self):
self.assertEqual(self.settings.get('pants', 'jeans'), 'jeans')
|
PSU-OIT-ARC/django-arcutils
|
arcutils/tests/test_settings.py
|
Python
|
mit
| 2,723
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "battleground.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
pguridi/pywars
|
manage.py
|
Python
|
mit
| 255
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Mission()
result.template = "object/mission/base/shared_base_mission.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mission/base/shared_base_mission.py
|
Python
|
mit
| 435
|
import os
from collections import namedtuple
from eg import color
from eg import config
from mock import patch
# Some hardcoded real colors.
_YELLOW = '\x1b[33m'
_MAGENTA = '\x1b[35m'
_BLACK = '\x1b[30m'
_GREEN = '\x1b[32m'
# The flags in the test file marking where substitutions should/can occur.
SubFlags = namedtuple(
'SubFlags',
[
'pound',
'pound_reset',
'heading',
'heading_reset',
'code',
'code_reset',
'backticks',
'backticks_reset',
'prompt',
'prompt_reset'
]
)
SUB_FLAGS = SubFlags(
pound='{POUND}',
pound_reset='{POUND_RESET}',
heading='{HEADING}',
heading_reset='{HEADING_RESET}',
code='{CODE}',
code_reset='{CODE_RESET}',
backticks='{BACKTICKS}',
backticks_reset='{BACKTICKS_RESET}',
prompt='{PROMPT}',
prompt_reset='{PROMPT_RESET}'
)
FIND_FILE_WITH_SUBS = os.path.join(
os.path.dirname(__file__),
'assets',
'find_example_substitute.md'
)
def get_clean_find_file():
"""Get the test file for find as pure markdown."""
# Defaults are empty strings, so this works.
raw_file = get_raw_find_test_file()
cleaned_file = get_data_with_subs(raw_file)
return cleaned_file
def get_raw_find_test_file():
"""Read the test file in and return it as a string."""
with open(FIND_FILE_WITH_SUBS, 'r') as f:
data = f.read()
return data
def get_data_with_subs(
string,
pound='',
heading='',
code='',
backticks='',
pound_reset='',
heading_reset='',
code_reset='',
backticks_reset='',
prompt='',
prompt_reset=''
):
"""
Return string with substitutions made. By default, with no parameters, will
simply remove all substitution flags, replacing them all with the empty
string.
This substitutes things manually, without using regular expressions. The
reset_strings are provided to try and allow testing only some of the
colorizations at a time. For example, if you are just colorizing the
headings, you'll want the reset escape sequence there. You won't want them
for the block indents, however, or else you'd end up with things like:
code code RESET
code line two RESET
which obviously wouldn't make sense.
"""
data = string
data = data.replace(SUB_FLAGS.pound, pound)
data = data.replace(SUB_FLAGS.pound_reset, pound_reset)
data = data.replace(SUB_FLAGS.heading, heading)
data = data.replace(SUB_FLAGS.heading_reset, heading_reset)
data = data.replace(SUB_FLAGS.code, code)
data = data.replace(SUB_FLAGS.code_reset, code_reset)
data = data.replace(SUB_FLAGS.backticks, backticks)
data = data.replace(SUB_FLAGS.backticks_reset, backticks_reset)
data = data.replace(SUB_FLAGS.prompt, prompt)
data = data.replace(SUB_FLAGS.prompt_reset, prompt_reset)
return data
def test_colorize_heading():
"""Makes sure we colorize things like '# find' correctly"""
color_config = config.ColorConfig(
'P',
'H',
_YELLOW,
_MAGENTA,
_BLACK,
'RES',
'RES',
'',
'',
''
)
clean = get_clean_find_file()
raw_file = get_raw_find_test_file()
target = get_data_with_subs(
raw_file,
pound=color_config.pound,
pound_reset=color_config.pound_reset,
heading=color_config.heading,
heading_reset=color_config.heading_reset
)
colorizer = color.EgColorizer(color_config)
actual = colorizer.colorize_heading(clean)
assert actual == target
def test_colorize_block_indents():
"""Makes sure we colorize block indents correctly."""
color_config = config.ColorConfig(
_BLACK,
_MAGENTA,
'C',
_YELLOW,
'P',
'',
'',
'res',
'',
'res'
)
clean = get_clean_find_file()
raw_file = get_raw_find_test_file()
target = get_data_with_subs(
raw_file,
code=color_config.code,
code_reset=color_config.code_reset,
prompt=color_config.prompt,
prompt_reset=color_config.prompt_reset
)
colorizer = color.EgColorizer(color_config)
actual = colorizer.colorize_block_indent(clean)
assert actual == target
def test_colorize_backticks():
"""Makes sure we colorize backticks correctly."""
color_config = config.ColorConfig(
_BLACK,
_MAGENTA,
_YELLOW,
'B',
_GREEN,
'',
'',
'',
'res',
''
)
clean = get_clean_find_file()
raw_file = get_raw_find_test_file()
target = get_data_with_subs(
raw_file,
backticks=color_config.backticks,
backticks_reset=color_config.backticks_reset,
)
colorizer = color.EgColorizer(color_config)
actual = colorizer.colorize_backticks(clean)
assert actual == target
@patch('eg.color.EgColorizer.colorize_backticks',
return_value='text-heading-indent-backticks')
@patch('eg.color.EgColorizer.colorize_block_indent',
return_value='text-heading-indent')
@patch('eg.color.EgColorizer.colorize_heading', return_value='text-heading')
def test_colorize_text_calls_all_sub_methods(heading, indent, backticks):
"""colorize_text should call all of the helper colorize methods."""
colorizer = color.EgColorizer(None)
text = 'text'
actual = colorizer.colorize_text(text)
heading.assert_called_once_with(text)
indent.assert_called_once_with('text-heading')
backticks.assert_called_once_with('text-heading-indent')
assert 'text-heading-indent-backticks' == actual
|
scorphus/eg
|
test/color_test.py
|
Python
|
mit
| 5,718
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/weapon/shared_blacksun_heavy_weapon2_s02.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/ship/attachment/weapon/shared_blacksun_heavy_weapon2_s02.py
|
Python
|
mit
| 474
|
"""Test kytos.core.buffers module."""
import asyncio
from unittest import TestCase
from unittest.mock import MagicMock, patch
from kytos.core.buffers import KytosBuffers, KytosEventBuffer
# pylint: disable=protected-access
class TestKytosEventBuffer(TestCase):
"""KytosEventBuffer tests."""
def setUp(self):
"""Instantiate a KytosEventBuffer."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.kytos_event_buffer = KytosEventBuffer('name', loop=self.loop)
@staticmethod
def create_event_mock(name='any'):
"""Create a new event mock."""
event = MagicMock()
event.name = name
return event
def test_put_get(self):
"""Test put and get methods."""
event = self.create_event_mock()
self.kytos_event_buffer.put(event)
queue_event = self.kytos_event_buffer.get()
self.assertEqual(queue_event, event)
def test_put__shutdown(self):
"""Test put method to shutdown event."""
event = self.create_event_mock('kytos/core.shutdown')
self.kytos_event_buffer.put(event)
self.assertTrue(self.kytos_event_buffer._reject_new_events)
def test_aput(self):
"""Test aput async method."""
event = MagicMock()
event.name = 'kytos/core.shutdown'
self.loop.run_until_complete(self.kytos_event_buffer.aput(event))
self.assertTrue(self.kytos_event_buffer._reject_new_events)
def test_aget(self):
"""Test aget async method."""
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
expected = self.loop.run_until_complete(self.kytos_event_buffer.aget())
self.assertEqual(event, expected)
@patch('janus._SyncQueueProxy.task_done')
def test_task_done(self, mock_task_done):
"""Test task_done method."""
self.kytos_event_buffer.task_done()
mock_task_done.assert_called()
@patch('janus._SyncQueueProxy.join')
def test_join(self, mock_join):
"""Test join method."""
self.kytos_event_buffer.join()
mock_join.assert_called()
def test_qsize(self):
"""Test qsize method to empty and with one event in query."""
qsize_1 = self.kytos_event_buffer.qsize()
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
qsize_2 = self.kytos_event_buffer.qsize()
self.assertEqual(qsize_1, 0)
self.assertEqual(qsize_2, 1)
def test_empty(self):
"""Test empty method to empty and with one event in query."""
empty_1 = self.kytos_event_buffer.empty()
event = self.create_event_mock()
self.kytos_event_buffer._queue.sync_q.put(event)
empty_2 = self.kytos_event_buffer.empty()
self.assertTrue(empty_1)
self.assertFalse(empty_2)
@patch('janus._SyncQueueProxy.full')
def test_full(self, mock_full):
"""Test full method to full and not full query."""
mock_full.side_effect = [False, True]
full_1 = self.kytos_event_buffer.full()
full_2 = self.kytos_event_buffer.full()
self.assertFalse(full_1)
self.assertTrue(full_2)
class TestKytosBuffers(TestCase):
"""KytosBuffers tests."""
def setUp(self):
"""Instantiate a KytosBuffers."""
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.kytos_buffers = KytosBuffers(loop=self.loop)
def test_send_stop_signal(self):
"""Test send_stop_signal method."""
self.kytos_buffers.send_stop_signal()
self.assertTrue(self.kytos_buffers.raw._reject_new_events)
self.assertTrue(self.kytos_buffers.msg_in._reject_new_events)
self.assertTrue(self.kytos_buffers.msg_out._reject_new_events)
self.assertTrue(self.kytos_buffers.app._reject_new_events)
|
kytos/kytos
|
tests/unit/test_core/test_buffers.py
|
Python
|
mit
| 3,932
|
import sys
import codecs
from textblob import Blobber
from textblob.wordnet import Synset
from textblob.en.np_extractors import ConllExtractor
from collections import Counter
import re
from nltk.corpus import wordnet as wn
from nltk.corpus.reader import NOUN
import os
import string
import itertools
from nltk.corpus import stopwords
stoplist = stopwords.words('english')
stoplist.extend(stopwords.words('french'))
stoplist.extend(["cette", "made", "works", "image", "images", "les", "comme"])
stoplist.extend(["photograph", "photographer", "film", "untitled", "series", "artist"])
stoplist.extend(["photographs", "other", "like", "also", "said", "work", "one", "two", "three"])
stoplist.extend(list(string.ascii_lowercase))
def wn_synonyms(ss):
return [l.name().decode('utf-8') for l in ss.lemmas()]
def wn_expand(ss):
x= [wn_getword(ss)]
b = tb(ss.definition())
x.extend([t[0] for t in b.tags if t[1] in ['JJ', 'NN', 'NNS']])
return x
def wn_getword(ss):
return ss if isinstance(ss, basestring) else ss.name().decode('utf-8').split('.')[0]
def wn_make_synset(word):
if '.' in word:
return wn.synset(word)
else:
ss = wn.synsets(word, NOUN)
if ss:
return ss[0]
else:
return None
def contains_number(word):
return re.search(r'[0-9]', word)
def bad(word):
return contains_number(word) or word.lower() in stoplist or len(word) < 3
def extract_capitalized(text):
return list(set(re.findall(r'([A-Z][a-z]+(?=\s[A-Z])(?:\s[A-Z][a-z]+)+)', re.sub(r'\n', ' _ ', text))))
tb = Blobber(np_extractor=ConllExtractor())
if __name__ == "__main__":
for arg in sys.argv[1:]:
with codecs.open(arg, 'r', encoding='utf-8') as f:
text = f.read()
b = tb(text)
step1 = [t[0] for t in b.tags if t[1] in ['JJ', 'NN', 'NNS'] and not bad(t[0])]
#step2 = [wn_make_synset(word) for word in step1 if wn_make_synset(word)]
#step3 = list(itertools.chain.from_iterable([wn_expand(ss) for ss in step2]))
print "\n"
print '=' *60
print arg
print '=' *60
print ' *', Counter(step1)
print ' *', extract_capitalized(text)
|
darenr/MOMA-Art
|
extract_concepts/concepts.py
|
Python
|
mit
| 2,123
|
from decimal import Decimal
from electrum.util import (format_satoshis, format_fee_satoshis, parse_URI,
is_hash256_str, chunks)
from . import SequentialTestCase
class TestUtil(SequentialTestCase):
def test_format_satoshis(self):
self.assertEqual("0.00001234", format_satoshis(1234))
def test_format_satoshis_negative(self):
self.assertEqual("-0.00001234", format_satoshis(-1234))
def test_format_fee_float(self):
self.assertEqual("1.7", format_fee_satoshis(1700/1000))
def test_format_fee_decimal(self):
self.assertEqual("1.7", format_fee_satoshis(Decimal("1.7")))
def test_format_fee_precision(self):
self.assertEqual("1.666",
format_fee_satoshis(1666/1000, precision=6))
self.assertEqual("1.7",
format_fee_satoshis(1666/1000, precision=1))
def test_format_satoshis_whitespaces(self):
self.assertEqual(" 0.0001234 ",
format_satoshis(12340, whitespaces=True))
self.assertEqual(" 0.00001234",
format_satoshis(1234, whitespaces=True))
def test_format_satoshis_whitespaces_negative(self):
self.assertEqual(" -0.0001234 ",
format_satoshis(-12340, whitespaces=True))
self.assertEqual(" -0.00001234",
format_satoshis(-1234, whitespaces=True))
def test_format_satoshis_diff_positive(self):
self.assertEqual("+0.00001234",
format_satoshis(1234, is_diff=True))
def test_format_satoshis_diff_negative(self):
self.assertEqual("-0.00001234", format_satoshis(-1234, is_diff=True))
def _do_test_parse_URI(self, uri, expected):
result = parse_URI(uri)
self.assertEqual(expected, result)
def test_parse_URI_address(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma'})
def test_parse_URI_only_address(self):
self._do_test_parse_URI('15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma'})
def test_parse_URI_address_label(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?label=electrum%20test',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'label': 'electrum test'})
def test_parse_URI_address_message(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?message=electrum%20test',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'message': 'electrum test', 'memo': 'electrum test'})
def test_parse_URI_address_amount(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.0003',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'amount': 30000})
def test_parse_URI_address_request_url(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?r=http://domain.tld/page?h%3D2a8628fc2fbe',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'r': 'http://domain.tld/page?h=2a8628fc2fbe'})
def test_parse_URI_ignore_args(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?test=test',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'test': 'test'})
def test_parse_URI_multiple_args(self):
self._do_test_parse_URI('bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.00004&label=electrum-test&message=electrum%20test&test=none&r=http://domain.tld/page',
{'address': '15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma', 'amount': 4000, 'label': 'electrum-test', 'message': u'electrum test', 'memo': u'electrum test', 'r': 'http://domain.tld/page', 'test': 'none'})
def test_parse_URI_no_address_request_url(self):
self._do_test_parse_URI('bitcoin:?r=http://domain.tld/page?h%3D2a8628fc2fbe',
{'r': 'http://domain.tld/page?h=2a8628fc2fbe'})
def test_parse_URI_invalid_address(self):
self.assertRaises(BaseException, parse_URI, 'bitcoin:invalidaddress')
def test_parse_URI_invalid(self):
self.assertRaises(BaseException, parse_URI, 'notbitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma')
def test_parse_URI_parameter_polution(self):
self.assertRaises(Exception, parse_URI, 'bitcoin:15mKKb2eos1hWa6tisdPwwDC1a5J1y9nma?amount=0.0003&label=test&amount=30.0')
def test_is_hash256_str(self):
self.assertTrue(is_hash256_str('09a4c03e3bdf83bbe3955f907ee52da4fc12f4813d459bc75228b64ad08617c7'))
self.assertTrue(is_hash256_str('2A5C3F4062E4F2FCCE7A1C7B4310CB647B327409F580F4ED72CB8FC0B1804DFA'))
self.assertTrue(is_hash256_str('00' * 32))
self.assertFalse(is_hash256_str('00' * 33))
self.assertFalse(is_hash256_str('qweqwe'))
self.assertFalse(is_hash256_str(None))
self.assertFalse(is_hash256_str(7))
def test_chunks(self):
self.assertEqual([[1, 2], [3, 4], [5]],
list(chunks([1, 2, 3, 4, 5], 2)))
with self.assertRaises(ValueError):
list(chunks([1, 2, 3], 0))
|
fujicoin/electrum-fjc
|
electrum/tests/test_util.py
|
Python
|
mit
| 5,385
|
###########################################################
#
# Copyright (c) 2010, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
__all__ = [ 'SystemInfoWdg', 'LinkLoadTestWdg' ,'ClearSideBarCache']
import os, platform, sys
from pyasm.common import Environment, Config, Common
from pyasm.security import Login
from tactic.ui.common import BaseRefreshWdg
from pyasm.web import DivWdg, Table, WebContainer, Widget, SpanWdg
from pyasm.search import Search
from pyasm.biz import Project
from pyasm.widget import CheckboxWdg, TextWdg
from pyasm.command import Command
from tactic.ui.widget import ActionButtonWdg
class SystemInfoWdg(BaseRefreshWdg):
def get_display(self):
top = DivWdg()
top.add_color("background", "background")
top.add_color("color", "color")
top.add_style("min-width: 600px")
os_name = os.name
top.set_unique_id()
top.add_smart_style("spt_info_title", "background", self.top.get_color("background3"))
top.add_smart_style("spt_info_title", "padding", "3px")
top.add_smart_style("spt_info_title", "font-weight", "bold")
# server
title_div = DivWdg()
top.add(title_div)
title_div.add("Server")
title_div.add_class("spt_info_title")
os_div = DivWdg()
top.add(os_div)
os_info = platform.uname()
try:
os_login = os.getlogin()
except Exception:
os_login = os.environ.get("LOGNAME")
table = Table()
table.add_color("color", "color")
table.add_style("margin: 10px")
os_div.add(table)
for i, title in enumerate(['OS','Node Name','Release','Version','Machine']):
table.add_row()
td = table.add_cell("%s: " % title)
td.add_style("width: 150px")
table.add_cell( os_info[i] )
table.add_row()
table.add_cell("CPU Count: ")
try :
import multiprocessing
table.add_cell( multiprocessing.cpu_count() )
except (ImportError, NotImplementedError):
table.add_cell( "n/a" )
table.add_row()
table.add_cell("Login: ")
table.add_cell( os_login )
# python
title_div = DivWdg()
top.add(title_div)
title_div.add("Python")
title_div.add_class("spt_info_title")
table = Table()
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("Version: ")
td.add_style("width: 150px")
table.add_cell( sys.version )
# client
title_div = DivWdg()
top.add(title_div)
title_div.add("Client")
title_div.add_class("spt_info_title")
web = WebContainer.get_web()
user_agent = web.get_env("HTTP_USER_AGENT")
table = Table()
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("User Agent: ")
td.add_style("width: 150px")
table.add_cell( user_agent )
table.add_row()
td = table.add_cell("TACTIC User: ")
table.add_cell( web.get_user_name() )
top.add('<br/>')
self.handle_load_balancing(top)
# performance test
top.add('<br/>')
title_div = DivWdg()
top.add(title_div)
title_div.add("Performance Test")
title_div.add_class("spt_info_title")
performance_wdg = PerformanceWdg()
top.add(performance_wdg)
top.add('<br/>')
# mail server
title_div = DivWdg()
top.add(title_div)
title_div.add("Mail Server")
title_div.add_class("spt_info_title")
table = Table(css='email_server')
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("Server: ")
td.add_style("width: 150px")
mailserver = Config.get_value("services", "mailserver")
has_mailserver = True
if mailserver:
table.add_cell( mailserver )
else:
table.add_cell("None configured")
has_mailserver = False
login = Login.get_by_login('admin')
login_email = login.get_value('email')
table.add_row()
td = table.add_cell("From: ")
td.add_style("width: 150px")
text = TextWdg('email_from')
text.set_attr('size', '40')
text.set_value(login_email)
text.add_class('email_from')
table.add_cell(text)
table.add_row()
td = table.add_cell("To: ")
td.add_style("width: 150px")
text = TextWdg('email_to')
text.set_attr('size', '40')
text.add_class('email_to')
text.set_value(login_email)
table.add_cell(text)
button = ActionButtonWdg(title='Email Send Test')
table.add_row_cell('<br/>')
table.add_row()
table.add_cell(button)
button.add_style("float: right")
button.add_behavior( {
'type': 'click_up',
'has_mailserver': has_mailserver,
'cbjs_action': '''
if (!bvr.has_mailserver) {
spt.alert('You have to fill in mailserver and possibly other mail related options in the TACTIC config file to send email.');
return;
}
var s = TacticServerStub.get();
try {
spt.app_busy.show('Sending email');
var from_txt = bvr.src_el.getParent('.email_server').getElement('.email_from');
var to_txt = bvr.src_el.getParent('.email_server').getElement('.email_to');
var rtn = s.execute_cmd('pyasm.command.EmailTriggerTestCmd',
{'sender_email': from_txt.value,
'recipient_emails': to_txt.value.split(','),
'msg': 'Simple Email Test by TACTIC'}
);
if (rtn.status == 'OK') {
spt.info("Email sent successfully to " + to_txt.value)
}
} catch(e) {
spt.alert(spt.exception.handler(e));
}
spt.app_busy.hide();
'''
})
top.add('<br/>')
self.handle_directories(top)
#table.add_row()
#td = table.add_cell("TACTIC User: ")
#table.add_cell( web.get_user_name() )
top.add('<br/>')
top.add(DivWdg('Link Test', css='spt_info_title'))
top.add('<br/>')
top.add(LinkLoadTestWdg())
top.add('<br/>')
self.handle_python_script_test(top)
top.add('<br/>')
self.handle_sidebar_clear(top)
return top
def handle_directories(self, top):
# deal with asset directories
top.add(DivWdg('Asset Folders', css='spt_info_title'))
mailserver = Config.get_value("services", "mailserver")
table = Table()
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("asset_base_dir: ")
td.add_style("width: 150px")
asset_base_dir = Config.get_value("checkin", "asset_base_dir")
if asset_base_dir:
table.add_cell( asset_base_dir )
tr = table.add_row()
tr.add_style('border-bottom: 1px #bbb solid')
# check if it is writable
is_writable = os.access(asset_base_dir, os.W_OK)
span = SpanWdg("writable:")
span.add_style('padding-left: 20px')
td = table.add_cell(span)
td = table.add_cell(str(is_writable))
else:
table.add_cell( "None configured")
client_os = Environment.get_env_object().get_client_os()
if os.name == 'nt':
os_name = 'win32'
else:
os_name = 'linux'
if client_os == 'nt':
client_os_name = 'win32'
else:
client_os_name = 'linux'
env = Environment.get()
client_handoff_dir = env.get_client_handoff_dir(include_ticket=False, no_exception=True)
client_asset_dir = env.get_client_repo_dir()
table.add_row()
td = table.add_cell("%s_server_handoff_dir: " % os_name)
td.add_style("width: 150px")
handoff_dir = Config.get_value("checkin", "%s_server_handoff_dir" % os_name)
if handoff_dir:
table.add_cell( handoff_dir )
table.add_row()
# check if it is writable
is_writable = os.access(handoff_dir, os.W_OK)
span = SpanWdg("writable:")
span.add_style('padding-left: 20px')
td = table.add_cell(span)
td = table.add_cell(str(is_writable))
else:
table.add_cell( "None configured")
table.add_row()
td = table.add_cell("%s hand-off test: " % client_os_name)
td.add_style("width: 150px")
button = ActionButtonWdg(title='Test')
button.add_behavior( {
'type': 'click_up',
'handoff_dir': client_handoff_dir,
'asset_dir': client_asset_dir,
'cbjs_action': '''
var env = spt.Environment.get();
var applet = spt.Applet.get();
var handoff_state = applet.exists(bvr.handoff_dir);
var asset_state = applet.exists(bvr.asset_dir);
if (asset_state == false) {
env.set_transfer_mode("web");
spt.error('client repo directory is not accessible: ' + bvr.asset_dir);
}
else if (handoff_state == false) {
env.set_transfer_mode("web");
spt.error('client handoff directory is not accessible: ' + bvr.handoff_dir);
}
else {
env.set_transfer_mode("copy");
spt.info('<div>client handoff directory: ' + bvr.handoff_dir + '</div><br/><div>client repo directory :' + bvr.asset_dir + '</div><br/><div> can be successfully accessed.</div>', {type:'html'});
}
'''
} )
table.add_cell( button )
def handle_python_script_test(self, top):
top.add(DivWdg('Python Script Test', css='spt_info_title'))
table = Table(css='script')
table.add_color("color", "color")
table.add_style("margin: 10px")
table.add_style("width: 100%")
top.add(table)
table.add_row()
td = table.add_cell("Script Path: ")
td.add_style("width: 150px")
text = TextWdg('script_path')
td = table.add_cell(text)
button = ActionButtonWdg(title='Run')
table.add_cell(button)
button.add_style("float: right")
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var s = TacticServerStub.get();
try {
var path = bvr.src_el.getParent('.script').getElement('.spt_input').value;
if (! path)
throw('Please enter a valid script path');
s.execute_cmd('tactic.command.PythonCmd', {script_path: path});
} catch(e) {
spt.alert(spt.exception.handler(e));
}
'''
})
def handle_load_balancing(self, top):
# deal with asset directories
top.add(DivWdg('Load Balancing', css='spt_info_title'))
table = Table()
table.add_class("spt_loadbalance")
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("Load Balancing: ")
td.add_style("width: 150px")
button = ActionButtonWdg(title='Test')
td = table.add_cell(button)
message_div = DivWdg()
message_div.add_class("spt_loadbalance_message")
table.add_cell(message_div)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var server = TacticServerStub.get()
var ports = {};
var count = 0;
for (var i = 0; i < 50; i++) {
var info = server.get_connection_info();
var port = info.port;
var num = ports[port];
if (!num) {
ports[port] = 1;
count += 1;
}
else {
ports[port] += 1;
}
// if there are 10 requests and still only one, then break
if (i == 10 && count == 1)
break;
}
// build the ports string
x = [];
for (i in ports) {
x.push(i);
}
x.sort();
x = x.join(", ");
var loadbalance_el = bvr.src_el.getParent(".spt_loadbalance");
var message_el = loadbalance_el.getElement(".spt_loadbalance_message");
if (count > 1) {
var message = "Yes (found " + count + " ports: "+x+")";
}
else {
var message = "<blink style='background: red; padding: 3px'>Not enabled (found only port " + x + ")</blink>";
}
message_el.innerHTML = message
'''
} )
def handle_sidebar_clear(self, top):
top.add(DivWdg('Clear Side Bar Cache ', css='spt_info_title'))
table = Table()
table.add_color("color", "color")
table.add_style("margin: 10px")
top.add(table)
table.add_row()
td = table.add_cell("Clear the Side Bar Cache for all users")
td.add_style("width: 250px")
button = ActionButtonWdg(title='Run')
table.add_cell(button)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
try {
var s = TacticServerStub.get();
s.execute_cmd('tactic.ui.app.ClearSideBarCache');
} catch(e) {
spt.alert(spt.exception.handler(e));
}
spt.info('Side Bar cache cleared.')
'''
})
class ClearSideBarCache(Command):
def execute(self):
tmp_dir = Environment.get_tmp_dir()
# remove the sidebar cache
sidebar_cache_dir = "%s/cache/side_bar" % tmp_dir
if os.path.exists(sidebar_cache_dir):
import shutil
shutil.rmtree(sidebar_cache_dir)
class LinkLoadTestWdg(BaseRefreshWdg):
'''Load Pages in popup as part of a testing process'''
def get_display(self):
config_search_type = "config/widget_config"
configs = []
all_element_names = []
from tactic.ui.panel import SideBarBookmarkMenuWdg
SideBarBookmarkMenuWdg.add_internal_config(configs, ['definition'])
for internal_config in configs:
all_element_names = internal_config.get_element_names()
search = Search(config_search_type)
search.add_filter("search_type", 'SideBarWdg')
search.add_filter("view", 'definition')
search.add_filter("login", None)
config = search.get_sobject()
element_names = []
if config:
element_names = config.get_element_names()
for name in element_names:
if 'separator' in name:
element_names.remove(name)
all_element_names.extend(element_names)
all_element_names = [str(name) for name in all_element_names]
all_element_names = Common.get_unique_list(all_element_names)
widget = DivWdg(css='spt_load_test_top')
span = SpanWdg('This loads all the pages defined in the Project views in popups. It will take a few minutes.')
widget.add(span)
widget.add('<br/>')
div = ActionButtonWdg(title='Run')
web = WebContainer.get_web()
base_url = web.get_base_url().to_string()
base_url = '%s/tactic/%s' %(base_url, Project.get_project_code())
div.add_behavior({'type': 'click_up',
'cbjs_action': '''
var element_names = eval(%s);
var all_element_names = eval(%s);
var top = spt.get_parent(bvr.src_el, '.spt_load_test_top');
var cb = spt.get_element(top, '.spt_input')
if (cb.checked)
element_list = all_element_names;
else
element_list = element_names
for (var k=0; k < element_list.length; k++) {
var name = element_list[k];
//if (k > 3) break;
var url = '%s/#/link/' + name;
var bvr2 = {
title: name,
target_id: 'TEST',
options: {'link': name,
'title': name,
'path': '/Link Test/' + name
},
is_popup: true};
spt.side_bar.display_link_cbk(null, bvr2);
}
''' %(element_names, all_element_names, base_url)})
widget.add('<br/>')
cb = CheckboxWdg('include_internal', label='include built-in pages')
span = SpanWdg(cb, css='med')
span.add_color('color','color')
widget.add(span)
widget.add(div)
widget.add('<br/>')
widget.add('<br/>')
return widget
class PerformanceWdg(BaseRefreshWdg):
def get_display(self):
top = self.top
top.add("<br/>")
top.add_style("margin-left: 10px")
try:
import multiprocessing
cpu_count = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
cpu_count = 'n/a'
title = DivWdg()
title.add("Click to start performance test: ")
title.add_style("float: left")
top.add(title)
title.add_style("margin-top: 5px")
button = ActionButtonWdg(title='Test')
top.add(button)
button.add_behavior( {
'type': 'click_up',
'cpu_count': cpu_count,
'cbjs_action': '''
var iterations = bvr.cpu_count;
if (iterations == 'n/a')
iterations = 1;
var server = TacticServerStub.get();
var class_name = 'tactic.ui.panel.ViewPanelWdg';
var kwargs = {
'search_type': 'sthpw/login',
'view': 'table'
};
var args = {
'args': kwargs,
'cbjs_action': function() {
spt.app_busy.show("Asyncronous Test", "Running Test ["+(count+1)+" / "+iterations+"]");
count += 1;
var time = new Date().getTime() - start;
if (time > async_avg) {
async_avg = time;
}
if (count == iterations) {
spt.app_busy.hide();
async_avg = async_avg / iterations;
alert("async: "+ async_avg + " ms");
}
}
};
var sync_avg = 0.0;
for (var i = 0; i < iterations; i++) {
spt.app_busy.show("Syncronous Requests", "Running Test ["+(i+1)+" / "+iterations+"]");
var start = new Date().getTime();
server.get_widget(class_name, args);
var time = new Date().getTime() - start;
sync_avg += time;
}
sync_avg = sync_avg / iterations;
spt.app_busy.hide();
alert("sync: " + sync_avg + " ms");
var async_avg = 0.0;
var count = 0;
spt.app_busy.show("Asyncronous Requests", "Running Test ["+(count+1)+" / "+iterations+"]");
var start = new Date().getTime();
for (var i = 0; i < iterations; i++) {
server.async_get_widget(class_name, args);
}
'''
} )
return top
|
Southpaw-TACTIC/TACTIC
|
src/tactic/ui/app/system_info_wdg.py
|
Python
|
epl-1.0
| 19,736
|
#
# Copyright (C) 2004 SIPfoundry Inc.
# Licensed by SIPfoundry under the GPL license.
#
# Copyright (C) 2004 SIP Forum
# Licensed to SIPfoundry under a Contributor Agreement.
#
#
# This file is part of SIP Forum User Agent Basic Test Suite which
# belongs to the SIP Forum Test Framework.
#
# SIP Forum User Agent Basic Test Suite is free software; you can
# redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# SIP Forum User Agent Basic Test Suite is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SIP Forum User Agent Basic Test Suite; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
# $Id: case901.py,v 1.2 2004/05/02 18:57:36 lando Exp $
#
from TestCase import TestCase
import NetworkEventHandler as NEH
import Log
class case901 (TestCase):
def config(self):
self.name = "Case 901"
self.description = "Presence of RFC3261 branch ID in Via"
self.isClient = False
self.transport = "UDP"
self.interactRequired = True
def run(self):
self.neh = NEH.NetworkEventHandler(self.transport)
#if not self.userInteraction("case901: proceed when ready to send INVITE"):
# neh.closeSock()
# return
print " !!!! PLEASE CALL ANY NUMBER/USER WITHIN 1 MINUTE !!!!"
req = self.readRequestFromNetwork(self.neh, 60)
if req is None:
self.addResult(TestCase.TC_ERROR, "missing INVITE request")
else:
if req.hasHeaderField("Via"):
if req.hasParsedHeaderField("Via"):
via = req.getParsedHeaderValue("Via")
if via.branch is None:
self.addResult(TestCase.TC_WARN, "missing branch in Via")
else:
if via.branch.startswith("z9hG4bK"):
self.addResult(TestCase.TC_PASSED, "branch value begins with magic cookie \'z9hG4bK\'")
else:
self.addResult(TestCase.TC_WARN, "Via branch does not begin with magic cookie \'z9hG4bK\'")
else:
self.addResult(TestCase.TC_ERROR, "missing parsed Via header")
else:
self.addResult(TestCase.TC_ERROR, "missing Via header in request")
self.neh.closeSock()
def onINVITE(self, message):
Log.logTest("rejecteing received INVITE with 603")
repl = self.createReply(603, "Decline")
self.writeMessageToNetwork(self.neh, repl)
ack = self.readRequestFromNetwork(self.neh)
if ack is None:
self.addResult(TestCase.TC_ERROR, "missing ACK on negative reply")
|
VoIP-co-uk/sftf
|
UserAgentBasicTestSuite/case901.py
|
Python
|
gpl-2.0
| 2,768
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# This script is called by snmptrapd and sends
# all traps to the mkeventd
#
# Bastian Kuhn, bk@mathias-kettner.de
# If you use this script please keep in mind that this script is called
# for every trap the server receives.
# To use this Script, you have to configure your snmptrad.conf like that:
# authCommunity execute public
# traphandle default /path/to/this/script
# Define the Hostname patterns here:
hostname_patterns = [
'SMI::enterprises.2349.2.2.2.5 = "(.*)"'
]
import time
import sys
import re
# Insert here the name of your omd site
site_name = "TESTSITE"
deamon_path = "/omd/sites/%s/tmp/run/mkeventd/events" % site_name
data = []
match_host = False
for line in sys.stdin:
line = line.strip()
if hostname_patterns:
for pattern in hostname_patterns:
e = re.search(pattern, line)
if e:
match_host = e.group(1)
data.append(line)
msg = " ".join(data[2:])
host, ip = data[:2]
if match_host:
host = match_host.strip()
#Write to mkevent Socket
out = open(deamon_path, "w")
timestamp = time.strftime("%b %d %H:%M:%S", time.localtime(time.time()))
out.write("<5>%s %s trap: %s\n" % (timestamp, host, msg))
out.close()
|
huiyiqun/check_mk
|
doc/treasures/Event_Console/snmptd_mkevent.py
|
Python
|
gpl-2.0
| 2,674
|
from cities_light.models import Country, City
from django.test import TestCase
from django.contrib.auth.models import User
from blog.models import Tag, ResourceType, News, Resource
from community.models import Community
from users.models import SystersUser
class TagModelTestCase(TestCase):
def test_str(self):
"""Test Tag object string representation"""
tag = Tag.objects.create(name="Foo")
self.assertEqual(str(tag), "Foo")
class ResourceTypeModelTestCase(TestCase):
def test_str(self):
"""Test ResourceType object string representation"""
resource_type = ResourceType.objects.create(name="Foo")
self.assertEqual(str(resource_type), "Foo")
class NewsModelTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get(user=self.user)
country = Country.objects.create(name='Bar', continent='AS')
location = City.objects.create(name='Foo', display_name='Foo',
country=country)
self.community = Community.objects.create(name="Foo", slug="foo",
order=1, location=location,
admin=self.systers_user)
def test_str(self):
"""Test News object string representation"""
news = News.objects.create(slug="foonews", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
self.assertEqual(str(news), "Bar of Foo Community")
class ResourceModelTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(username='foo', password='foobar')
self.systers_user = SystersUser.objects.get(user=self.user)
country = Country.objects.create(name='Bar', continent='AS')
location = City.objects.create(name='Foo', display_name='Foo',
country=country)
self.community = Community.objects.create(name="Foo", slug="foo",
order=1, location=location,
admin=self.systers_user)
def test_unicode(self):
"""Test Resource object string representation"""
resource = Resource.objects.create(slug="fooresource", title="Bar",
author=self.systers_user,
content="Hi there!",
community=self.community)
self.assertEqual(str(resource), "Bar of Foo Community")
|
payal97/portal
|
systers_portal/blog/tests/test_models.py
|
Python
|
gpl-2.0
| 2,763
|
#!/usr/bin/env python
# encoding: utf-8
import mistune
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import html
from flask import current_app, render_template
from application.models.system import site
from application.services.system import has
base_env = {
'site': site,
'has': has,
'tags': {
'join': lambda x: '',
},
'category': {
'join': lambda x: '',
},
'_': lambda x: x,
'i18n': lambda x, y, z: y,
'mobile_meta': lambda: '',
'get_resource': lambda x: '',
}
class HighlightRenderer(mistune.Renderer):
def block_code(self, code, lang_desc):
if not lang_desc:
return '\n<pre><code>%s</code></pre>\n' % \
mistune.escape(code)
lang_split = lang_desc.split(":")
lang = lang_split[0]
lexer = get_lexer_by_name(lang, stripall=True)
formatter = html.HtmlFormatter(
linenos=True,
style=current_app.config.get("HIGHLIGHT_STYLE", "default"))
return highlight(code, lexer, formatter)
def render_theme_template(template, **kwargs):
kwargs.update(base_env)
return render_template(template, **kwargs)
def format_markdown(mdstr):
render = HighlightRenderer()
markdown = mistune.Markdown(renderer=render)
return markdown(mdstr)
|
luke0922/MarkdownEditor
|
application/utils/template.py
|
Python
|
gpl-2.0
| 1,363
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 6 15:32:36 2014
@author: commun
Converts filtered data sets to tx2 format used in Aarhusinv.
CHANGELOG:
"""
#import of needed modules
import numpy as np
#~ def to_tx2(path_filt, path_tx2, electrode_spacing, ngates=20, pulselength=2):
#~ path_filt = '../shiprock_filt/v3/l6sk0n_1_1b.dat'
path_filt = '../shiprock_filt/v4/l8_1_mg_4b.dat'
#~ path_tx2 = '../shiprock_tx2/'
path_tx2 = '../'
electrode_spacing = 2
pulselength=2
ngates=20
frags = path_filt.split('/')
lid = frags[-1][:-4]
ngates = ngates
elec_sp = electrode_spacing
pl = pulselength
path_filt = path_filt
path_tx2 = path_tx2
filt = np.genfromtxt(path_filt, delimiter='\t', skip_header=1)
########################################################################
# DATA WRANGLING #
########################################################################
out = np.zeros((len(filt), 28+4*ngates))
#x-electrode position along measuring tape (m)
out[:,0:4] = (filt[:,0:4]-1)*elec_sp
#x-position of electrodes
#eg use of gps coordinates if available
out[:,4:8] = (filt[:,0:4]-1)*elec_sp
#y-position of electrodes
#~ out[:,8:12]
#z-position of electrodes
#~ out[:,12:16]
#apparent resistivity
a = (filt[:,0]-1)*elec_sp
b = (filt[:,1]-1)*elec_sp
m = (filt[:,2]-1)*elec_sp
n = (filt[:,3]-1)*elec_sp
am = np.sqrt((m-a)**2)
an = np.sqrt((n-a)**2)
bm = np.sqrt((m-b)**2)
bn = np.sqrt((n-b)**2)
k = 2*np.pi*(1/(1/am-1/an-1/bm+1/bn))
#~ out[:,16] = filt[:,4]
out[:,16] = k[:]*filt[:,4]
#deviation resistance
out[:,17] = filt[:,6]/10
#resistance flag (0 keeps data, 1 removes data)
#~ out[:,18]
#number of ip gates
out[:,19] = ngates
#ip values [mV/V] per gate
out[:,20:20+ngates] = filt[:,9:29]
#mdelay [ms]
#~ out[:,20+ngates] = filt[0,29]
out[:,20+ngates] = filt[0,29]
#gate lengths [ms]
#~ out[:,21+ngates:21+ngates*2] = filt[:,30:50]
out[:,21+ngates:21+ngates*2] = filt[:,30:50]
#deviation of every window
#for syscal files put 0.0 until proper error model is introduced
#for ares: use values given by device
#check for device
c, d = np.shape(filt)
if d>51:
out[:,21+ngates*2:21+ngates*3] = filt[:,51:]
else:
out[:,21+ngates*2:21+ngates*3] = 0.1
#ip flag
#~ out[:,21+ngats*3:21+ngates*4]
#stacking
out[:,21+ngates*4] = filt[:,50]
#current = (50% of pulse length)
out[:,22+ngates*4] = pl/2
#wave type
out[:,23+ngates*4] = pl/2
#Ton = pulse length [s]
out[:,24+ngates*4] = pl*1000
#Toff = pulse length [s]
out[:,25+ngates*4] = pl*1000
#Tend = time used to collect decay curve [s]
out[:,26+ngates*4] = (np.sum(filt[0,30:50])+filt[0,29])
#Tstart = time at which the decay curve is starting to be measured
out[:,27+ngates*4] = filt[0,29]
#######################################################################
# WRITE TO FILE #
#######################################################################
M = []
G = []
STD = []
IP_flag = []
Mfmt = []
Gfmt = []
STDfmt = []
IP_flagfmt = []
for num in range(ngates):
M.append('M' + str(num+1))
Mfmt.append('%.3f\t')
G.append('Gate' + str(num+1))
Gfmt.append('%.3f\t')
STD.append('Std' + str(num+1))
STDfmt.append('%.2f\t')
IP_flag.append('IP_Flag' + str(num+1))
IP_flagfmt.append('%d\t')
#~ print(M)
M = '\t'.join(M)
G = '\t'.join(G)
STD = '\t'.join(STD)
IP_flag = '\t'.join(IP_flag)
Mfmt = ''.join(Mfmt)
Gfmt = ''.join(Gfmt)
STDfmt = ''.join(STDfmt)
IP_flagfmt = ''.join(IP_flagfmt)
np.savetxt(path_tx2 + lid + '.tx2',
out,
#~ fmt='%3.6f',
fmt='%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t' + #electrode positions
'%f\t%.3f\t%d\t%d\t' + #resistance, devR, ResFlag, Ngates
Mfmt + '%.3f\t' + Gfmt + STDfmt + IP_flagfmt +
'%d\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f',
delimiter='\t',
header='xA xB xM xN UTMxA UTMxB UTMxM UTMxN UTMyA UTMyB\t' +
'UTMyM UTMyN zA zB zM zN Res Dev ResFlag Ngates\t' + M + '\tMdly\t' + G + '\t' + STD +
'\t' + IP_flag + '\tStack Current\tWaveType Ton Toff Tend Tstart',
comments='')
|
commun108/dca_testing
|
single_file_gen/scripts/to_tx2.py
|
Python
|
gpl-2.0
| 4,114
|
from sqlobject.dbconnection import registerConnection
def builder():
import mysqlconnection
return mysqlconnection.MySQLConnection
def isSupported():
try:
import MySQLdb
except ImportError:
return False
return True
registerConnection(['mysql'], builder, isSupported)
|
pacoqueen/bbinn
|
SQLObject/SQLObject-0.6.1/sqlobject/mysql/__init__.py
|
Python
|
gpl-2.0
| 306
|
# -*- coding: utf-8 -*-
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# ##########################################################################
from time import sleep
vtysh_cr = "\r\n"
route_max_wait_time = 300
class SwitchVtyshUtils():
@staticmethod
def vtysh_cfg_cmd(switch, cfg_array, show_running_cfg=False,
show_results=False):
switch("configure terminal")
for cmd in cfg_array:
result = switch(cmd)
if show_results:
print("### Config results: %s ###\n" % result)
switch("end")
@staticmethod
def wait_for_route(switch, network, next_hop, condition=True,
print_routes=False):
for i in range(route_max_wait_time):
attempt = i + 1
found = SwitchVtyshUtils.verify_bgp_route(switch, network,
next_hop, attempt,
print_routes)
if found == condition:
if condition:
result = "Route was found"
else:
result = "Route was not found"
print("### %s ###\n" % result)
return found
sleep(1)
print("### Condition not met after %s seconds ###\n" %
route_max_wait_time)
return found
@staticmethod
def verify_bgp_route(switch, network, next_hop, attempt=1,
print_routes=False):
print("### Verifying route on switch %s [attempt #%d] - Network: %s, "
"Next-Hop: %s ###\n" %
(switch.name, attempt, network, next_hop))
routes = switch("show ip bgp")
if print_routes:
print("### Routes for switch %s ###\n" % switch.name)
print("%s\n" % routes)
routes = routes.split(vtysh_cr)
for rte in routes:
if (network in rte) and (next_hop in rte):
return True
routes = switch("show ipv6 bgp")
if print_routes:
print("### Routes for switch %s ###\n" % switch.name)
print("%s\n" % routes)
routes = routes.split(vtysh_cr)
for rte in routes:
if (network in rte) and (next_hop in rte):
return True
return False
@staticmethod
def verify_cfg_exist(switch, cfg_array):
return SwitchVtyshUtils.verify_cfg_value(switch, cfg_array, '')
@staticmethod
def verify_cfg_value(switch, cfg_array, value):
running_cfg = SwitchVtyshUtils.vtysh_get_running_cfg(switch)
running_cfg = running_cfg.split(vtysh_cr)
for rc in running_cfg:
for c in cfg_array:
if (c in rc) and (str(value) in rc):
return True
return False
@staticmethod
def vtysh_get_running_cfg(switch):
return switch("show running-config")
__all__ = ["SwitchVtyshUtils"]
|
johcheun/ops-pmd
|
ops-tests/feature/bgp/vtysh_utils.py
|
Python
|
gpl-2.0
| 3,609
|
import networkx as nx
import re
def get_graph_info(file_path):
def extract_first_two(collection):
return [int(collection[0]), int(collection[1])]
with open(file_path) as ifs:
lines = map(lambda ele: ele.strip(), ifs.readlines())
lines = filter(lambda ele: not ele.startswith('#') and re.match('.*[0-9]+.*[0-9]+', ele), lines)
pair_list = map(lambda ele: extract_first_two(map(lambda ele2: ele2.strip(), ele.split())), lines)
return nx.Graph(pair_list)
|
GraphProcessor/CommunityDetectionCodes
|
Prensentation/metrics/util.py
|
Python
|
gpl-2.0
| 504
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
SumLines.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsFeature, QgsGeometry, QgsFeatureRequest, QgsDistanceArea
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class SumLines(GeoAlgorithm):
LINES = 'LINES'
POLYGONS = 'POLYGONS'
LEN_FIELD = 'LEN_FIELD'
COUNT_FIELD = 'COUNT_FIELD'
OUTPUT = 'OUTPUT'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'ftools', 'sum_lines.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Sum line lengths')
self.group, self.i18n_group = self.trAlgorithm('Vector analysis tools')
self.addParameter(ParameterVector(self.LINES,
self.tr('Lines'), [ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterVector(self.POLYGONS,
self.tr('Polygons'), [ParameterVector.VECTOR_TYPE_POLYGON]))
self.addParameter(ParameterString(self.LEN_FIELD,
self.tr('Lines length field name', 'LENGTH')))
self.addParameter(ParameterString(self.COUNT_FIELD,
self.tr('Lines count field name', 'COUNT')))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Line length')))
def processAlgorithm(self, progress):
lineLayer = dataobjects.getObjectFromUri(self.getParameterValue(self.LINES))
polyLayer = dataobjects.getObjectFromUri(self.getParameterValue(self.POLYGONS))
lengthFieldName = self.getParameterValue(self.LEN_FIELD)
countFieldName = self.getParameterValue(self.COUNT_FIELD)
polyProvider = polyLayer.dataProvider()
(idxLength, fieldList) = vector.findOrCreateField(polyLayer,
polyLayer.pendingFields(), lengthFieldName)
(idxCount, fieldList) = vector.findOrCreateField(polyLayer, fieldList,
countFieldName)
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fieldList.toList(), polyProvider.geometryType(), polyProvider.crs())
spatialIndex = vector.spatialindex(lineLayer)
ftLine = QgsFeature()
ftPoly = QgsFeature()
outFeat = QgsFeature()
inGeom = QgsGeometry()
outGeom = QgsGeometry()
distArea = QgsDistanceArea()
features = vector.features(polyLayer)
total = 100.0 / len(features)
hasIntersections = False
for current, ftPoly in enumerate(features):
inGeom = QgsGeometry(ftPoly.geometry())
attrs = ftPoly.attributes()
count = 0
length = 0
hasIntersections = False
lines = spatialIndex.intersects(inGeom.boundingBox())
if len(lines) > 0:
hasIntersections = True
if hasIntersections:
for i in lines:
request = QgsFeatureRequest().setFilterFid(i)
ftLine = lineLayer.getFeatures(request).next()
tmpGeom = QgsGeometry(ftLine.geometry())
if inGeom.intersects(tmpGeom):
outGeom = inGeom.intersection(tmpGeom)
length += distArea.measure(outGeom)
count += 1
outFeat.setGeometry(inGeom)
if idxLength == len(attrs):
attrs.append(length)
else:
attrs[idxLength] = length
if idxCount == len(attrs):
attrs.append(count)
else:
attrs[idxCount] = count
outFeat.setAttributes(attrs)
writer.addFeature(outFeat)
progress.setPercentage(int(current * total))
del writer
|
AsgerPetersen/QGIS
|
python/plugins/processing/algs/qgis/SumLines.py
|
Python
|
gpl-2.0
| 5,242
|
import wx
class Choice(wx.Choice):
def GetValue(self):
return self.GetSelection()
|
onoga/toolib
|
toolib/wx/controls/Choice.py
|
Python
|
gpl-2.0
| 86
|
__author__ = 'Tom Schaul, tom@idsia.ch'
from pybrain.utilities import blockCombine
from scipy import mat, dot, outer
from scipy.linalg import inv, cholesky
def calcFisherInformation(sigma, invSigma=None, factorSigma=None):
""" Compute the exact Fisher Information Matrix of a Gaussian distribution,
given its covariance matrix.
Returns a list of the diagonal blocks. """
if invSigma == None:
invSigma = inv(sigma)
if factorSigma == None:
factorSigma = cholesky(sigma)
dim = sigma.shape[0]
fim = [invSigma]
for k in range(dim):
D = invSigma[k:, k:].copy()
D[0, 0] += factorSigma[k, k] ** -2
fim.append(D)
return fim
def calcInvFisher(sigma, invSigma=None, factorSigma=None):
""" Efficiently compute the exact inverse of the FIM of a Gaussian.
Returns a list of the diagonal blocks. """
if invSigma == None:
invSigma = inv(sigma)
if factorSigma == None:
factorSigma = cholesky(sigma)
dim = sigma.shape[0]
invF = [mat(1 / (invSigma[-1, -1] + factorSigma[-1, -1] ** -2))]
invD = 1 / invSigma[-1, -1]
for k in reversed(range(dim - 1)):
v = invSigma[k + 1:, k]
w = invSigma[k, k]
wr = w + factorSigma[k, k] ** -2
u = dot(invD, v)
s = dot(v, u)
q = 1 / (w - s)
qr = 1 / (wr - s)
t = -(1 + q * s) / w
tr = -(1 + qr * s) / wr
invF.append(blockCombine([[qr, tr * u], [mat(tr * u).T, invD + qr * outer(u, u)]]))
invD = blockCombine([[q , t * u], [mat(t * u).T, invD + q * outer(u, u)]])
invF.append(sigma)
invF.reverse()
return invF
|
iut-ibk/Calimero
|
site-packages/pybrain/tools/fisher.py
|
Python
|
gpl-2.0
| 1,728
|
class MastermindError(Exception):
@property
def code(self):
return MASTERMIND_ERROR_CODES[type(self)]
@staticmethod
def make_error(code, msg):
if code not in MASTERMIND_ERROR_CLS:
raise ValueError('Unknown error code {}'.format(code))
return MASTERMIND_ERROR_CLS[code](msg)
GENERAL_ERROR_CODE = 1024
MASTERMIND_ERROR_CODES = {
MastermindError: GENERAL_ERROR_CODE
}
MASTERMIND_ERROR_CLS = dict((v, k) for k, v in MASTERMIND_ERROR_CODES.iteritems())
|
yandex/mastermind
|
src/python-mastermind/src/mastermind/errors.py
|
Python
|
gpl-2.0
| 508
|
#!/usr/bin/env python
__author__ = "Dulip Withanage"
__email__ = "dulip.withanage@gmail.com"
import re
import string
import sys
import operator
import globals as gv
import os
import subprocess
import shutil
#from django.utils.encoding import smart_str
class FrontMatterParser:
def __init__(self, gv):
self.gv = gv
def parse_authors(self, filestring):
# this works for perception-monospace, equations tables, laddering, neoliberalism, snowball, valuechain, sodium
name = re.findall(r'(\n|<p>|<bold>|<italic>)(([A-Za-z\-\.]+)\*?\s){2,5}(&|and|et|und)\s(([A-Za-z\-\.]+)\*?\s?){2,5}(</p>|</bold>|</italic>|\n)',filestring)
if len(name) == 0:
# this works for racialprofiling, antiseptics, eeg_comicsans, leadership, systemsthinker
# this would work for science.doc but there are way too many authors and that affects the string
# would work for rating.doc but need to fix linebreak comments from output
name2 = re.findall(r'(<p>|<bold>|<italic>)(([A-Za-z\-\.]+)(,?\s)){1,20}([A-Za-z\-\.]+)?(</p>|</bold>|</italic>)',filestring)
# this loops through strings and prefers those that occur earlier + have more periods/commas
guess2score = {}
guess2number = 0
for g in name2:
guess2 =''.join(str(e) for e in g)
periods = re.findall(r'\.',guess2)
italics = re.findall(r'italic',guess2)
guess2score[guess2] = len(periods)
guess2score[guess2] += len(italics)
guess2score[guess2] -= guess2number
guess2number += 1
#print operator.itemgetter(1)
print(guess2score.items())
print(type(operator.itemgetter(1)))
name[0] = max(guess2score.items(), key=operator.itemgetter(1))[0]
striptags_name = re.sub(r'<.*>','',name[0])
authorString = re.sub(r'[B|b][Y|y]\s','',striptags_name)
# this is the author string. could try sending to parscit to get individual author names.
return authorString
# entrepreneurship needs fixing, will be tough, has authors in multiple XML elements
def parse_title(self, filestring):
# need to anticipate which other special characters are allowable in titles
# first, check if a subtitle and title have wound up separated from one another
title = re.findall(r'(\n|<p>|<bold>|<italic>)(([A-Za-z\-\.]+)(,?\s)){1,20}([A-Za-z\-\.]+)?:(</p>|</bold>|</italic>|\n)(.|\s)*?(\n|<p>|<bold>|<italic>)(([A-Za-z\-\.]+)((:|,)?\s)){1,20}([A-Za-z\-\.]+)?\??(</p>|</bold>|</italic>|\n)',filestring)
if len(title) == 0:
# this works for antiseptics, eeg_comicsans, entrepreneurship, laddering, racialprofiling, snowball, sodium
title2 = re.findall(r'(\n|<p>|<bold>|<italic>)(([A-Za-z\-\.]+)((:|,)?\s)){1,20}([A-Za-z\-\.]+)?\??(</p>|</bold>|</italic>|\n)',filestring)
title = title2
#title0 = ''.join(title[0])
title_first= ''.join(title[0])
#remove <> tags
titleString = re.sub(r'<(.*)>','',re.sub(r'</(.*)>','',title_first))
return titleString
def get_file_text(self, filename):
f = open(filename)
text= f.read()
f.close()
return text
def update_tmp_file(self):
shutil.copy2(self.gv.NLM_FILE_PATH,self.gv.NLM_TEMP_FILE_PATH)
def write_output(self, text):
out = open(self.gv.NLM_FILE_PATH,'w')
out.write(text)
out.close()
def run(self):
text = self.get_file_text(self.gv.NLM_TEMP_FILE_PATH)
#self.parse_authors(text)
self.parse_title(text)
self.write_output(text)
self.update_tmp_file()
|
MartinPaulEve/meTypeset
|
bin/frontmatterparser.py
|
Python
|
gpl-2.0
| 3,339
|
# -*- coding: utf-8 -*-
import os
import grp
import tempfile
from django.conf import settings
from utilities import encoding
import shutil
import zipfile
gid = None
if (settings.USEPRAKTOMATTESTER):
gid = grp.getgrnam('praktomat').gr_gid
def makedirs(path):
if os.path.exists(path):
return
else:
(head, tail) = os.path.split(path)
makedirs(head)
os.mkdir(path)
if (gid):
os.chown(path, -1, gid)
os.chmod(path, 0o770)
def create_file(path, content, override=True, binary=False):
""" """
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
makedirs(dirname)
else:
if os.path.exists(path):
if override: # delete file
os.remove(path)
else: # throw exception
raise Exception('File already exists')
with open(path, 'wb') as fd:
if binary:
fd.write(content)
else:
fd.write(encoding.get_utf8(encoding.get_unicode(content)))
if (gid):
# chown :praktomat <path>
os.chown(path, -1, gid)
# rwxrwx--- access for praktomattester:praktomat
os.chmod(path, 0o770)
def copy_file(from_path, to_path, to_is_directory=False, override=True):
""" """
if to_is_directory:
to_path = os.path.join(to_path, os.path.basename(from_path))
with open(from_path, "rb") as fd:
create_file(to_path, fd.read(), override=override, binary=True)
def create_tempfolder(path):
makedirs(path)
tempfile.tempdir = path
new_tmpdir = tempfile.mkdtemp()
if (gid):
os.chown(new_tmpdir, -1, gid)
os.chmod(new_tmpdir, 0o770)
return new_tmpdir
class InvalidZipFile(Exception):
pass
def unpack_zipfile_to(zipfilename, to_path, override_cb=None, file_cb=None):
"""
Extracts a zipfile to the given location, trying to safeguard against wrong paths
The override_cb is called for every file that overwrites an existing file,
with the name of the file in the archive as the parameter.
The file_cb is called for every file, after extracting it.
"""
if not zipfile.is_zipfile(zipfilename):
raise InvalidZipFile("File %s is not a zipfile." % zipfilename)
zip = zipfile.ZipFile(zipfilename, 'r')
if zip.testzip():
raise InvalidZipFile("File %s is invalid." % zipfilename)
# zip.extractall would not protect against ..-paths,
# it would do so from python 2.7.4 on.
for finfo in zip.infolist():
dest = os.path.join(to_path, finfo.filename)
# This check is from http://stackoverflow.com/a/10077309/946226
if not os.path.realpath(os.path.abspath(dest)).startswith(to_path):
raise InvalidZipFile("File %s contains illegal path %s." % (zipfilename, finfo.filename))
if override_cb is not None and os.path.exists(dest):
override_cb(finfo.filename)
zip.extract(finfo, to_path)
if file_cb is not None and os.path.isfile(os.path.join(to_path, finfo.filename)):
file_cb(finfo.filename)
|
KITPraktomatTeam/Praktomat
|
src/utilities/file_operations.py
|
Python
|
gpl-2.0
| 3,099
|
from __future__ import print_function, unicode_literals
from datetime import datetime, timedelta
import time
import unittest
from DenyHosts.counter import Counter, CounterRecord
class CounterRecordTest(unittest.TestCase):
def test_init(self):
c = CounterRecord()
self.assertEqual(c.get_count(), 0)
# Counter.__date is initialized with time.asctime(), so there isn't
# much to test beyond the type
self.assertTrue(isinstance(c.get_date(), str))
def test_init_provided_date(self):
"""
CounterRecord.__date is intended to be a string (for some reason; a datetime
object would be more appropriate), but any object can be used. Verify that
what we pass to the constructor is accessible.
"""
date = object()
c = CounterRecord(date=date)
self.assertTrue(c.get_date() is date)
def test_init_provided_count(self):
"""
CounterRecord.__count is intended to be numeric, but any object can be used.
Verify that what we pass to the constructor is accessible.
"""
count = object()
c = CounterRecord(count=count)
self.assertTrue(c.get_count() is count)
def test_str(self):
"""
CounterRecord.__str__ is actually used in PurgeCounter.write_data, so it's
worth testing
"""
count = 1
date = object()
c = CounterRecord(count=count, date=date)
string = '%d:%s' % (count, date)
self.assertEqual(str(c), string)
def test_add(self):
"""
CounterRecord.__add__ is *horrible* design, but that's how it's been for a
very long time. I want test coverage for the current behavior before making
any changes.
"""
c = CounterRecord()
orig_date = c.get_date()
self.assertEqual(c.get_count(), 0)
increment = 4
# !
c + increment
self.assertEqual(c.get_count(), increment)
# Original attempt: self.assertNotEqual(c.get_date(), orig_date)
# time.asctime only provides seconds in that string representation of the
# date, though, so just verify that the two strings are different objects
# since they'll usually be equal
self.assertFalse(c.get_date() is orig_date)
def test_reset_count(self):
c = CounterRecord()
c + 1
orig_date = c.get_date()
c.reset_count()
self.assertEqual(c.get_count(), 0)
self.assertTrue(c.get_date() is orig_date)
def test_age_count_newer(self):
"""
Initialize a CounterRecord to one hour ago, then call age_count with 2 hours
to verify that the count won't reset. ("Reset if the stored date is older than
2 hours ago")
"""
one_hour_ago = datetime.now() - timedelta(hours=1)
date_str = time.asctime(one_hour_ago.timetuple())
count = object()
c = CounterRecord(count=count, date=date_str)
c.age_count(2 * 60 * 60)
self.assertEqual(c.get_count(), count)
def test_age_count_older(self):
"""
Initialize a CounterRecord to one hour ago, then reset the count by passing 0
to age_count (i.e. "reset if the stored date is older than now")
"""
one_hour_ago = datetime.now() - timedelta(hours=1)
date_str = time.asctime(one_hour_ago.timetuple())
count = object()
c = CounterRecord(count=count, date=date_str)
c.age_count(0)
self.assertEqual(c.get_count(), 0)
def test_counter_repr(self):
one_hour_ago = datetime.now() - timedelta(hours=1)
date_str = time.asctime(one_hour_ago.timetuple())
count = object()
c = CounterRecord(count=count, date=date_str)
c.age_count(0)
self.assertEqual(c.__repr__(), 'CountRecord <{} - {}>'.format(0, date_str))
class CounterTest(unittest.TestCase):
def test_init(self):
c = Counter()
self.assertEqual(len(c), 0)
def test_missing_key(self):
c = Counter()
key = 'key'
value = c[key]
self.assertEqual(value.get_count(), 0)
self.assertTrue(key in c)
def test_existing_key(self):
key = 'key'
value = object()
c = Counter()
c[key] = value
self.assertTrue(c[key] is value)
|
HardLight/denyhosts
|
tests/test_counter.py
|
Python
|
gpl-2.0
| 4,362
|
#!/usr/bin/python -u
#
#
#
#################################################################################
# Start off by implementing a general purpose event loop for anyones use
#################################################################################
import sys
import getopt
import os
import libvirt
import select
import errno
import time
import threading
# For the sake of demonstration, this example program includes
# an implementation of a pure python event loop. Most applications
# would be better off just using the default libvirt event loop
# APIs, instead of implementing this in python. The exception is
# where an application wants to integrate with an existing 3rd
# party event loop impl
#
# Change this to 'False' to make the demo use the native
# libvirt event loop impl
use_pure_python_event_loop = True
do_debug = False
def debug(msg):
global do_debug
if do_debug:
print(msg)
#
# This general purpose event loop will support waiting for file handle
# I/O and errors events, as well as scheduling repeatable timers with
# a fixed interval.
#
# It is a pure python implementation based around the poll() API
#
class virEventLoopPure:
# This class contains the data we need to track for a
# single file handle
class virEventLoopPureHandle:
def __init__(self, handle, fd, events, cb, opaque):
self.handle = handle
self.fd = fd
self.events = events
self.cb = cb
self.opaque = opaque
def get_id(self):
return self.handle
def get_fd(self):
return self.fd
def get_events(self):
return self.events
def set_events(self, events):
self.events = events
def dispatch(self, events):
self.cb(self.handle,
self.fd,
events,
self.opaque)
# This class contains the data we need to track for a
# single periodic timer
class virEventLoopPureTimer:
def __init__(self, timer, interval, cb, opaque):
self.timer = timer
self.interval = interval
self.cb = cb
self.opaque = opaque
self.lastfired = 0
def get_id(self):
return self.timer
def get_interval(self):
return self.interval
def set_interval(self, interval):
self.interval = interval
def get_last_fired(self):
return self.lastfired
def set_last_fired(self, now):
self.lastfired = now
def dispatch(self):
self.cb(self.timer,
self.opaque)
def __init__(self):
self.poll = select.poll()
self.pipetrick = os.pipe()
self.pendingWakeup = False
self.runningPoll = False
self.nextHandleID = 1
self.nextTimerID = 1
self.handles = []
self.timers = []
self.quit = False
# The event loop can be used from multiple threads at once.
# Specifically while the main thread is sleeping in poll()
# waiting for events to occur, another thread may come along
# and add/update/remove a file handle, or timer. When this
# happens we need to interrupt the poll() sleep in the other
# thread, so that it'll see the file handle / timer changes.
#
# Using OS level signals for this is very unreliable and
# hard to implement correctly. Thus we use the real classic
# "self pipe" trick. A anonymous pipe, with one end registered
# with the event loop for input events. When we need to force
# the main thread out of a poll() sleep, we simple write a
# single byte of data to the other end of the pipe.
debug("Self pipe watch %d write %d" %(self.pipetrick[0], self.pipetrick[1]))
self.poll.register(self.pipetrick[0], select.POLLIN)
# Calculate when the next timeout is due to occur, returning
# the absolute timestamp for the next timeout, or 0 if there is
# no timeout due
def next_timeout(self):
next = 0
for t in self.timers:
last = t.get_last_fired()
interval = t.get_interval()
if interval < 0:
continue
if next == 0 or (last + interval) < next:
next = last + interval
return next
# Lookup a virEventLoopPureHandle object based on file descriptor
def get_handle_by_fd(self, fd):
for h in self.handles:
if h.get_fd() == fd:
return h
return None
# Lookup a virEventLoopPureHandle object based on its event loop ID
def get_handle_by_id(self, handleID):
for h in self.handles:
if h.get_id() == handleID:
return h
return None
# This is the heart of the event loop, performing one single
# iteration. It asks when the next timeout is due, and then
# calcuates the maximum amount of time it is able to sleep
# for in poll() pending file handle events.
#
# It then goes into the poll() sleep.
#
# When poll() returns, there will zero or more file handle
# events which need to be dispatched to registered callbacks
# It may also be time to fire some periodic timers.
#
# Due to the coarse granularity of schedular timeslices, if
# we ask for a sleep of 500ms in order to satisfy a timer, we
# may return up to 1 schedular timeslice early. So even though
# our sleep timeout was reached, the registered timer may not
# technically be at its expiry point. This leads to us going
# back around the loop with a crazy 5ms sleep. So when checking
# if timeouts are due, we allow a margin of 20ms, to avoid
# these pointless repeated tiny sleeps.
def run_once(self):
sleep = -1
self.runningPoll = True
try:
next = self.next_timeout()
debug("Next timeout due at %d" % next)
if next > 0:
now = int(time.time() * 1000)
if now >= next:
sleep = 0
else:
sleep = (next - now) / 1000.0
debug("Poll with a sleep of %d" % sleep)
events = self.poll.poll(sleep)
# Dispatch any file handle events that occurred
for (fd, revents) in events:
# See if the events was from the self-pipe
# telling us to wakup. if so, then discard
# the data just continue
if fd == self.pipetrick[0]:
self.pendingWakeup = False
data = os.read(fd, 1)
continue
h = self.get_handle_by_fd(fd)
if h:
debug("Dispatch fd %d handle %d events %d" % (fd, h.get_id(), revents))
h.dispatch(self.events_from_poll(revents))
now = int(time.time() * 1000)
for t in self.timers:
interval = t.get_interval()
if interval < 0:
continue
want = t.get_last_fired() + interval
# Deduct 20ms, since scheduler timeslice
# means we could be ever so slightly early
if now >= (want-20):
debug("Dispatch timer %d now %s want %s" % (t.get_id(), str(now), str(want)))
t.set_last_fired(now)
t.dispatch()
except (os.error, select.error), e:
if e.args[0] != errno.EINTR:
raise
finally:
self.runningPoll = False
# Actually the event loop forever
def run_loop(self):
self.quit = False
while not self.quit:
self.run_once()
def interrupt(self):
if self.runningPoll and not self.pendingWakeup:
self.pendingWakeup = True
os.write(self.pipetrick[1], 'c'.encode("UTF-8"))
# Registers a new file handle 'fd', monitoring for 'events' (libvirt
# event constants), firing the callback cb() when an event occurs.
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_handle(self, fd, events, cb, opaque):
handleID = self.nextHandleID + 1
self.nextHandleID = self.nextHandleID + 1
h = self.virEventLoopPureHandle(handleID, fd, events, cb, opaque)
self.handles.append(h)
self.poll.register(fd, self.events_to_poll(events))
self.interrupt()
debug("Add handle %d fd %d events %d" % (handleID, fd, events))
return handleID
# Registers a new timer with periodic expiry at 'interval' ms,
# firing cb() each time the timer expires. If 'interval' is -1,
# then the timer is registered, but not enabled
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_timer(self, interval, cb, opaque):
timerID = self.nextTimerID + 1
self.nextTimerID = self.nextTimerID + 1
h = self.virEventLoopPureTimer(timerID, interval, cb, opaque)
self.timers.append(h)
self.interrupt()
debug("Add timer %d interval %d" % (timerID, interval))
return timerID
# Change the set of events to be monitored on the file handle
def update_handle(self, handleID, events):
h = self.get_handle_by_id(handleID)
if h:
h.set_events(events)
self.poll.unregister(h.get_fd())
self.poll.register(h.get_fd(), self.events_to_poll(events))
self.interrupt()
debug("Update handle %d fd %d events %d" % (handleID, h.get_fd(), events))
# Change the periodic frequency of the timer
def update_timer(self, timerID, interval):
for h in self.timers:
if h.get_id() == timerID:
h.set_interval(interval)
self.interrupt()
debug("Update timer %d interval %d" % (timerID, interval))
break
# Stop monitoring for events on the file handle
def remove_handle(self, handleID):
handles = []
for h in self.handles:
if h.get_id() == handleID:
self.poll.unregister(h.get_fd())
debug("Remove handle %d fd %d" % (handleID, h.get_fd()))
else:
handles.append(h)
self.handles = handles
self.interrupt()
# Stop firing the periodic timer
def remove_timer(self, timerID):
timers = []
for h in self.timers:
if h.get_id() != timerID:
timers.append(h)
debug("Remove timer %d" % timerID)
self.timers = timers
self.interrupt()
# Convert from libvirt event constants, to poll() events constants
def events_to_poll(self, events):
ret = 0
if events & libvirt.VIR_EVENT_HANDLE_READABLE:
ret |= select.POLLIN
if events & libvirt.VIR_EVENT_HANDLE_WRITABLE:
ret |= select.POLLOUT
if events & libvirt.VIR_EVENT_HANDLE_ERROR:
ret |= select.POLLERR
if events & libvirt.VIR_EVENT_HANDLE_HANGUP:
ret |= select.POLLHUP
return ret
# Convert from poll() event constants, to libvirt events constants
def events_from_poll(self, events):
ret = 0
if events & select.POLLIN:
ret |= libvirt.VIR_EVENT_HANDLE_READABLE
if events & select.POLLOUT:
ret |= libvirt.VIR_EVENT_HANDLE_WRITABLE
if events & select.POLLNVAL:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR
if events & select.POLLERR:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR
if events & select.POLLHUP:
ret |= libvirt.VIR_EVENT_HANDLE_HANGUP
return ret
###########################################################################
# Now glue an instance of the general event loop into libvirt's event loop
###########################################################################
# This single global instance of the event loop wil be used for
# monitoring libvirt events
eventLoop = virEventLoopPure()
# This keeps track of what thread is running the event loop,
# (if it is run in a background thread)
eventLoopThread = None
# These next set of 6 methods are the glue between the official
# libvirt events API, and our particular impl of the event loop
#
# There is no reason why the 'virEventLoopPure' has to be used.
# An application could easily may these 6 glue methods hook into
# another event loop such as GLib's, or something like the python
# Twisted event framework.
def virEventAddHandleImpl(fd, events, cb, opaque):
global eventLoop
return eventLoop.add_handle(fd, events, cb, opaque)
def virEventUpdateHandleImpl(handleID, events):
global eventLoop
return eventLoop.update_handle(handleID, events)
def virEventRemoveHandleImpl(handleID):
global eventLoop
return eventLoop.remove_handle(handleID)
def virEventAddTimerImpl(interval, cb, opaque):
global eventLoop
return eventLoop.add_timer(interval, cb, opaque)
def virEventUpdateTimerImpl(timerID, interval):
global eventLoop
return eventLoop.update_timer(timerID, interval)
def virEventRemoveTimerImpl(timerID):
global eventLoop
return eventLoop.remove_timer(timerID)
# This tells libvirt what event loop implementation it
# should use
def virEventLoopPureRegister():
libvirt.virEventRegisterImpl(virEventAddHandleImpl,
virEventUpdateHandleImpl,
virEventRemoveHandleImpl,
virEventAddTimerImpl,
virEventUpdateTimerImpl,
virEventRemoveTimerImpl)
# Directly run the event loop in the current thread
def virEventLoopPureRun():
global eventLoop
eventLoop.run_loop()
def virEventLoopNativeRun():
while True:
libvirt.virEventRunDefaultImpl()
# Spawn a background thread to run the event loop
def virEventLoopPureStart():
global eventLoopThread
virEventLoopPureRegister()
eventLoopThread = threading.Thread(target=virEventLoopPureRun, name="libvirtEventLoop")
eventLoopThread.setDaemon(True)
eventLoopThread.start()
def virEventLoopNativeStart():
global eventLoopThread
libvirt.virEventRegisterDefaultImpl()
eventLoopThread = threading.Thread(target=virEventLoopNativeRun, name="libvirtEventLoop")
eventLoopThread.setDaemon(True)
eventLoopThread.start()
##########################################################################
# Everything that now follows is a simple demo of domain lifecycle events
##########################################################################
def eventToString(event):
eventStrings = ( "Defined",
"Undefined",
"Started",
"Suspended",
"Resumed",
"Stopped",
"Shutdown",
"PMSuspended",
"Crashed" )
return eventStrings[event]
def detailToString(event, detail):
eventStrings = (
( "Added", "Updated" ),
( "Removed", ),
( "Booted", "Migrated", "Restored", "Snapshot", "Wakeup" ),
( "Paused", "Migrated", "IOError", "Watchdog", "Restored", "Snapshot", "API error" ),
( "Unpaused", "Migrated", "Snapshot" ),
( "Shutdown", "Destroyed", "Crashed", "Migrated", "Saved", "Failed", "Snapshot"),
( "Finished", ),
( "Memory", "Disk" ),
( "Panicked", )
)
return eventStrings[event][detail]
def myDomainEventCallback1 (conn, dom, event, detail, opaque):
print("myDomainEventCallback1 EVENT: Domain %s(%s) %s %s" % (dom.name(), dom.ID(),
eventToString(event),
detailToString(event, detail)))
def myDomainEventCallback2 (conn, dom, event, detail, opaque):
print("myDomainEventCallback2 EVENT: Domain %s(%s) %s %s" % (dom.name(), dom.ID(),
eventToString(event),
detailToString(event, detail)))
def myDomainEventRebootCallback(conn, dom, opaque):
print("myDomainEventRebootCallback: Domain %s(%s)" % (dom.name(), dom.ID()))
def myDomainEventRTCChangeCallback(conn, dom, utcoffset, opaque):
print("myDomainEventRTCChangeCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), utcoffset))
def myDomainEventWatchdogCallback(conn, dom, action, opaque):
print("myDomainEventWatchdogCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), action))
def myDomainEventIOErrorCallback(conn, dom, srcpath, devalias, action, opaque):
print("myDomainEventIOErrorCallback: Domain %s(%s) %s %s %d" % (dom.name(), dom.ID(), srcpath, devalias, action))
def myDomainEventGraphicsCallback(conn, dom, phase, localAddr, remoteAddr, authScheme, subject, opaque):
print("myDomainEventGraphicsCallback: Domain %s(%s) %d %s" % (dom.name(), dom.ID(), phase, authScheme))
def myDomainEventDiskChangeCallback(conn, dom, oldSrcPath, newSrcPath, devAlias, reason, opaque):
print("myDomainEventDiskChangeCallback: Domain %s(%s) disk change oldSrcPath: %s newSrcPath: %s devAlias: %s reason: %s" % (
dom.name(), dom.ID(), oldSrcPath, newSrcPath, devAlias, reason))
def myDomainEventTrayChangeCallback(conn, dom, devAlias, reason, opaque):
print("myDomainEventTrayChangeCallback: Domain %s(%s) tray change devAlias: %s reason: %s" % (
dom.name(), dom.ID(), devAlias, reason))
def myDomainEventPMWakeupCallback(conn, dom, reason, opaque):
print("myDomainEventPMWakeupCallback: Domain %s(%s) system pmwakeup" % (
dom.name(), dom.ID()))
def myDomainEventPMSuspendCallback(conn, dom, reason, opaque):
print("myDomainEventPMSuspendCallback: Domain %s(%s) system pmsuspend" % (
dom.name(), dom.ID()))
def myDomainEventBalloonChangeCallback(conn, dom, actual, opaque):
print("myDomainEventBalloonChangeCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), actual))
def myDomainEventPMSuspendDiskCallback(conn, dom, reason, opaque):
print("myDomainEventPMSuspendDiskCallback: Domain %s(%s) system pmsuspend_disk" % (
dom.name(), dom.ID()))
def myDomainEventDeviceRemovedCallback(conn, dom, dev, opaque):
print("myDomainEventDeviceRemovedCallback: Domain %s(%s) device removed: %s" % (
dom.name(), dom.ID(), dev))
run = True
def myConnectionCloseCallback(conn, reason, opaque):
reasonStrings = (
"Error", "End-of-file", "Keepalive", "Client",
)
print("myConnectionCloseCallback: %s: %s" % (conn.getURI(), reasonStrings[reason]))
run = False
def usage():
print("usage: "+os.path.basename(sys.argv[0])+" [-hdl] [uri]")
print(" uri will default to qemu:///system")
print(" --help, -h Print(this help message")
print(" --debug, -d Print(debug output")
print(" --loop, -l Toggle event-loop-implementation")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hdl", ["help", "debug", "loop"])
except getopt.GetoptError, err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-d", "--debug"):
global do_debug
do_debug = True
if o in ("-l", "--loop"):
global use_pure_python_event_loop
use_pure_python_event_loop ^= True
if len(args) >= 1:
uri = args[0]
else:
uri = "qemu:///system"
print("Using uri:" + uri)
# Run a background thread with the event loop
if use_pure_python_event_loop:
virEventLoopPureStart()
else:
virEventLoopNativeStart()
vc = libvirt.openReadOnly(uri)
# Close connection on exit (to test cleanup paths)
old_exitfunc = getattr(sys, 'exitfunc', None)
def exit():
print("Closing " + vc.getURI())
vc.close()
if (old_exitfunc): old_exitfunc()
sys.exitfunc = exit
vc.registerCloseCallback(myConnectionCloseCallback, None)
#Add 2 callbacks to prove this works with more than just one
vc.domainEventRegister(myDomainEventCallback1,None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, myDomainEventCallback2, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, myDomainEventRebootCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_RTC_CHANGE, myDomainEventRTCChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_IO_ERROR, myDomainEventIOErrorCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_WATCHDOG, myDomainEventWatchdogCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_GRAPHICS, myDomainEventGraphicsCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_DISK_CHANGE, myDomainEventDiskChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_TRAY_CHANGE, myDomainEventTrayChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_PMWAKEUP, myDomainEventPMWakeupCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_PMSUSPEND, myDomainEventPMSuspendCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_BALLOON_CHANGE, myDomainEventBalloonChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_PMSUSPEND_DISK, myDomainEventPMSuspendDiskCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED, myDomainEventDeviceRemovedCallback, None)
vc.setKeepAlive(5, 3)
# The rest of your app would go here normally, but for sake
# of demo we'll just go to sleep. The other option is to
# run the event loop in your main thread if your app is
# totally event based.
while run:
time.sleep(1)
if __name__ == "__main__":
main()
|
utkarshsins/baadal-libvirt-python
|
examples/event-test.py
|
Python
|
gpl-2.0
| 22,504
|
#!/usr/bin/env python3
#
#
#
# This file is part of librix-thinclient.
#
# librix-thinclient is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# librix-thinclient is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
#
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with librix-thinclient. If not, see <http://www.gnu.org/licenses/>.
__all__ = [
'ui',
'lib',
'daemon',
'modules',
]
|
andrevmatos/Librix-ThinClient
|
src/__init__.py
|
Python
|
gpl-2.0
| 777
|
# $Id: uas-subscribe-terminated-retry.py 4188 2012-06-29 09:01:17Z nanang $
#
import inc_const as const
PJSUA = ["--null-audio --max-calls=1 --id sip:pjsua@localhost --add-buddy $SIPP_URI"]
PJSUA_EXPECTS = [[0, "", "s"],
[0, "Subscribe presence of:", "1"],
[0, "Presence subscription .* is TERMINATED", ""],
[0, "Resubscribing .* in 5000 ms", ""]
]
|
AlexanderVangelov/pjsip
|
tests/pjsua/scripts-sipp/uas-subscribe-terminated-retry.py
|
Python
|
gpl-2.0
| 374
|
#----------------------------------------------
# ir_ula.py
#
# Intermediate representation for the ula (unconventional language)
# By Mitch Myburgh (MYBMIT001)
# 24 09 2015
#----------------------------------------------
from llvmlite import ir
from ctypes import CFUNCTYPE, c_float
import llvmlite.binding as llvm
# code for the parser
from ply import yacc
from lex_ula import tokens
import os
import sys
start = "Start"
def p_start(p):
"""Start : Program"""
p[0] = p[1]
def p_program_statements(p):
"""Program : Statements"""
p[0] = ["Program", p[1]]
def p_statements(p):
"""Statements : Statements Statement
| Statement"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[2]]
def p_statement(p):
"""Statement : ID '=' expression"""
p[0] = ["=", [p[1]], p[3]]
def p_expression_plus(p):
"""expression : expression '@' term"""
p[0] = ["@", p[1], p[3]]
def p_expression_minus(p):
"""expression : expression '$' term"""
p[0] = ["$", p[1], p[3]]
def p_expression_term(p):
"""expression : term"""
p[0] = p[1]
def p_term_multiply(p):
"""term : term '#' factor"""
p[0] = ["#", p[1], p[3]]
def p_term_divide(p):
"""term : term '&' factor"""
p[0] = ["&", p[1], p[3]]
def p_term_factor(p):
"""term : factor"""
p[0] = p[1]
def p_factor_expression(p):
"""factor : '(' expression ')'"""
p[0] = p[2]
def p_factor_float(p):
"""factor : FLOAT_LITERAL"""
p[0] = [p[1]]
def p_factor_id(p):
"""factor : ID"""
p[0] = [p[1]]
def p_error(p):
pass
def print_tree(tupletree, depth=0):
print("\t"*depth, tupletree[0])
for item in tupletree[1]:
if isinstance(item, tuple):
print_tree(item, depth + 1)
else:
print("\t"*(depth+1), item)
parser = yacc.yacc()
#main function for the parser
def main():
global infilename
if len(sys.argv) == 2:
infilename = sys.argv[1]
if os.path.isfile(infilename):
infile = open(infilename, "r")
syntree = parser.parse(infile.read())
# print_tree(syntree)
return syntree
else:
print("Not a valid file")
else:
print("Specify filename, e.g. parse_ula.ply my_program.ula")
##llvmlite stuff
last_var = "" # keeps track of the last var assigned
var_dict = {} # var names associated with memory location
def code_gen(tree): # traverse tree recursively to generate code
global last_var
if tree[0] == "Program":
for t in tree[1]:
code_gen(t)
elif tree[0] == "=":
last_var = tree[1][0]
var_dict[last_var] = builder.alloca(ir.FloatType())
builder.store(code_gen(tree[2]), var_dict[last_var])
elif tree[0] == "@":
return(builder.fadd(code_gen(tree[1]),code_gen(tree[2])))
elif tree[0] == "$":
return(builder.fsub(code_gen(tree[1]),code_gen(tree[2])))
elif tree[0] == "#":
return(builder.fmul(code_gen(tree[1]),code_gen(tree[2])))
elif tree[0] == "&":
return(builder.fdiv(code_gen(tree[1]),code_gen(tree[2])))
elif tree[0] in var_dict.keys():
return builder.load(var_dict[tree[0]])
elif isinstance(float(tree[0]), float):
return(ir.Constant(ir.FloatType(), float(tree[0])))
#main function for the ir generator
def run():
global builder
tree = main()
flttyp = ir.FloatType() # create float type
fnctyp = ir.FunctionType(flttyp, ()) # create function type to return a float
module = ir.Module(name="ula") # create module named "ula"
func = ir.Function(module, fnctyp, name="main") # create "main" function
block = func.append_basic_block(name="entry") # create block "entry" label
builder = ir.IRBuilder(block) # create irbuilder to generate code
code_gen(tree) # call code_gen() to traverse tree & generate code
builder.ret(builder.load(var_dict[last_var])) # specify return value
return module
if __name__ == "__main__":
module = run()
outfilename = os.path.splitext(infilename)[0]+".ir"
outfile = open(outfilename, "w")
print(str(module).strip())
print(str(module).strip(), file = outfile)
outfile.close()
|
mitchmyburgh/compilers_assignment
|
part2/ir_ula.py
|
Python
|
gpl-2.0
| 4,411
|