repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
laiqiqi886/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/tkinter/colorchooser.py
|
128
|
# tk common colour chooser dialogue
#
# this module provides an interface to the native color dialogue
# available in Tk 4.2 and newer.
#
# written by Fredrik Lundh, May 1997
#
# fixed initialcolor handling in August 1998
#
#
# options (all have default values):
#
# - initialcolor: colour to mark as selected when dialog is displayed
# (given as an RGB triplet or a Tk color string)
#
# - parent: which window to place the dialog on top of
#
# - title: dialog title
#
from tkinter.commondialog import Dialog
#
# color chooser class
class Chooser(Dialog):
"Ask for a color"
command = "tk_chooseColor"
def _fixoptions(self):
try:
# make sure initialcolor is a tk color string
color = self.options["initialcolor"]
if isinstance(color, tuple):
# assume an RGB triplet
self.options["initialcolor"] = "#%02x%02x%02x" % color
except KeyError:
pass
def _fixresult(self, widget, result):
# result can be somethings: an empty tuple, an empty string or
# a Tcl_Obj, so this somewhat weird check handles that
if not result or not str(result):
return None, None # canceled
# to simplify application code, the color chooser returns
# an RGB tuple together with the Tk color string
r, g, b = widget.winfo_rgb(result)
return (r/256, g/256, b/256), str(result)
#
# convenience stuff
def askcolor(color = None, **options):
"Ask for a color"
if color:
options = options.copy()
options["initialcolor"] = color
return Chooser(**options).show()
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
print("color", askcolor())
|
fpytloun/waliki
|
refs/heads/master
|
waliki/views.py
|
1
|
import json
from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import render, redirect, get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from django.core.urlresolvers import reverse
from .models import Page, Redirect
from .forms import PageForm, MovePageForm, DeleteForm, NewPageForm
from .signals import page_saved, page_preedit, page_moved
from ._markups import get_all_markups
from .acl import permission_required, check_perms
from . import settings
def home(request):
return detail(request, slug=settings.WALIKI_INDEX_SLUG)
def compile_breadcrumbs(slug):
breadcrumbs = [(reverse('waliki_home'), _('Home')),]
if slug == settings.WALIKI_INDEX_SLUG:
return breadcrumbs
slug_parts = slug.split('/')
url = ''
# for every string from start until the next slash (or end of string)
for part in slug_parts:
# if page exists, find url and title
# otherwise, grab url and title from slug
url = url + part
pages = Page.objects.filter(slug=url)
url = url + '/'
if pages:
title = pages[0].title
else:
title = part
breadcrumbs.append(('/'+url, title))
return breadcrumbs
@permission_required('view_page')
def detail(request, slug, raw=False):
slug = slug.strip('/')
# handle redirects first
try:
redirect = Redirect.objects.get(old_slug=slug) # noqa
if redirect.status_code == 302:
return HttpResponseRedirect(redirect.get_absolute_url())
return HttpResponsePermanentRedirect(redirect.get_absolute_url())
except Redirect.DoesNotExist:
pass
try:
page = Page.objects.get(slug=slug)
except Page.DoesNotExist:
page = None
if not check_perms('add_page', request.user, slug):
raise Http404
if raw and page:
return HttpResponse(page.raw, content_type='text/plain; charset=utf-8')
elif raw:
raise Http404
context = {'page': page, 'slug': slug}
if settings.WALIKI_BREADCRUMBS == True:
context['breadcrumbs'] = compile_breadcrumbs(slug)
return render(request, 'waliki/detail.html', context)
@permission_required('change_page')
def move(request, slug):
page = get_object_or_404(Page, slug=slug)
data = request.POST if request.method == 'POST' else None
form = MovePageForm(data, instance=page)
if request.method == 'POST' and form.is_valid():
new_slug = form.cleaned_data['slug']
# remove redirections (now there is an actual page)
Redirect.objects.filter(old_slug=new_slug).delete()
# squash redirections to new destiny
Redirect.objects.filter(new_slug=slug).update(new_slug=new_slug)
# create the new redirection
Redirect.objects.create(old_slug=slug, new_slug=new_slug)
if not form.cleaned_data['just_redirect']:
old_path = page.path
page.move(new_slug + page.markup_.file_extensions[0])
page.slug = new_slug
page.save()
msg = _("Page moved from %(old_slug)s") % {'old_slug': slug}
page_moved.send(sender=move,
page=page,
old_path=old_path,
author=request.user,
message=msg,
)
url = page.get_absolute_url()
else:
msg = _("A redirection from %(old_slug)s to this page was added") % {'old_slug': slug}
url = reverse('waliki_detail', args=[new_slug])
messages.success(request, msg)
if request.is_ajax():
return HttpResponse(json.dumps({'redirect': url}), content_type="application/json")
return redirect(url)
if request.is_ajax():
data = render_to_string('waliki/generic_form.html', {'page': page, 'form': form},
context_instance=RequestContext(request))
return HttpResponse(json.dumps({'data': data}), content_type="application/json")
return render(request, 'waliki/generic_form.html', {'page': page, 'form': form})
@permission_required('change_page')
def edit(request, slug):
slug = slug.strip('/')
just_created = False
try:
page = Page.objects.get(slug=slug)
except Page.DoesNotExist:
if request.method == 'POST':
page = Page.objects.create(slug=slug)
page.raw = ""
page_saved.send(sender=edit,
page=page,
author=request.user,
message=_("Page created"),
form_extra_data={})
just_created = True
else:
return redirect('waliki_detail', slug)
original_markup = page.markup
data = request.POST if request.method == 'POST' and not just_created else None
form_extra_data = {}
receivers_responses = page_preedit.send(sender=edit, page=page)
for r in receivers_responses:
if isinstance(r[1], dict) and 'form_extra_data' in r[1]:
form_extra_data.update(r[1]['form_extra_data'])
form = PageForm(
data, instance=page, initial={'extra_data': json.dumps(form_extra_data)})
if form.is_valid():
page = form.save(commit=False)
if page.markup != original_markup:
old_path = page.path
page.update_extension()
msg = _("The markup was changed from {original} to {new}").format(original=original_markup,
new=page.markup)
page_moved.send(sender=edit,
page=page,
old_path=old_path,
author=request.user,
message=msg,
commit=False
)
was_moved = True
messages.warning(request, msg)
else:
was_moved = False
page.raw = form.cleaned_data['raw']
page.save()
try:
receivers_responses = page_saved.send(sender=edit,
page=page,
author=request.user,
message=form.cleaned_data[
"message"],
form_extra_data=json.loads(form.cleaned_data["extra_data"] or "{}"),
was_moved=was_moved)
except Page.EditionConflict as e:
messages.warning(request, e)
return redirect('waliki_edit', slug=page.slug)
for r in receivers_responses:
if isinstance(r[1], dict) and 'messages' in r[1]:
for key, value in r[1]['messages'].items():
getattr(messages, key)(request, value)
if 'next' in request.GET:
return redirect(request.GET['next'])
return redirect('waliki_detail', slug=page.slug)
cm_modes = [(m.name, m.codemirror_mode_name) for m in get_all_markups()]
cm_settings = settings.WALIKI_CODEMIRROR_SETTINGS
cm_settings.update({'mode': dict(cm_modes)[page.markup]})
return render(request, 'waliki/edit.html', {'page': page,
'form': form,
'slug': slug,
'cm_modes': cm_modes,
'cm_settings': json.dumps(cm_settings)})
def preview(request):
data = {}
if request.is_ajax() and request.method == "POST":
data['html'] = Page.preview(
request.POST['markup'], request.POST['text'])
return HttpResponse(json.dumps(data), content_type="application/json")
@permission_required('delete_page')
def delete(request, slug):
page = get_object_or_404(Page, slug=slug)
data = request.POST if request.method == 'POST' else None
form = DeleteForm(data)
if form.is_valid():
if form.cleaned_data['what'] == 'this':
msg = _("The page %(slug)s was deleted") % {'slug': slug}
page.delete()
else:
Page.objects.filter(slug__startswith=slug).delete()
msg = _("The page %(slug)s and all its namespace was deleted") % {
'slug': slug}
messages.warning(request, msg)
if request.is_ajax():
return HttpResponse(json.dumps({'redirect': reverse('waliki_home')}), content_type="application/json")
return redirect('waliki_home')
if request.is_ajax():
data = render_to_string('waliki/delete.html', {'page': page, 'form': form},
context_instance=RequestContext(request))
return HttpResponse(json.dumps({'data': data}), content_type="application/json")
return render(request, 'waliki/delete.html', {'page': page, 'form': form})
def new(request):
data = request.POST if request.method == 'POST' else None
form = NewPageForm(data, user=request.user)
if request.method == 'POST' and form.is_valid():
page = form.save()
page.raw = ""
page_saved.send(sender=new,
page=page,
author=request.user,
message=_("Page created"),
form_extra_data={})
if request.is_ajax():
return HttpResponse(json.dumps({'redirect': page.get_edit_url()}), content_type="application/json")
return redirect(page.get_edit_url())
if request.is_ajax():
data = render_to_string('waliki/generic_form.html', {'form': form},
context_instance=RequestContext(request))
return HttpResponse(json.dumps({'data': data}), content_type="application/json")
return render(request, 'waliki/generic_form.html', {'form': form})
def get_slug(request):
slug = settings.get_slug(request.GET.get('title', ''))
return HttpResponse(json.dumps({'slug': slug}), content_type="application/json")
|
south-coast-science/scs_core
|
refs/heads/develop
|
src/scs_core/osio/client/__init__.py
|
12133432
| |
akokai/chemex
|
refs/heads/master
|
chemex/tests/__init__.py
|
12133432
| |
JianyuWang/neutron
|
refs/heads/master
|
neutron/cmd/__init__.py
|
12133432
| |
fabeschan/midiutil
|
refs/heads/master
|
build/lib/midiutil/MidiFile3.py
|
2
|
#-----------------------------------------------------------------------------
# Name: MidiFile.py
# Purpose: MIDI file manipulation utilities
#
# Author: Mark Conway Wirt <emergentmusics) at (gmail . com>
#
# Created: 2008/04/17
# Copyright: (c) 2009 Mark Conway Wirt
# License: Please see License.txt for the terms under which this
# software is distributed.
#-----------------------------------------------------------------------------
import struct, sys, math
# TICKSPERBEAT is the number of "ticks" (time measurement in the MIDI file) that
# corresponds to one beat. This number is somewhat arbitrary, but should be chosen
# to provide adequate temporal resolution.
TICKSPERBEAT = 960
controllerEventTypes = {
'pan' : 0x0a
}
class MIDIEvent:
'''
The class to contain the MIDI Event (placed on MIDIEventList.
'''
def __init__(self):
self.type='unknown'
self.time=0
self.ord = 0
def __lt__(self, other):
''' Sorting function for events.'''
if self.time < other.time:
return True
elif self.time > other.time:
return False
else:
if self.ord < other.ord:
return True
elif self.ord > other.ord:
return False
else:
return False
def __cmp__(self, other):
''' Sorting function for events.'''
if self.time < other.time:
return -1
elif self.time > other.time:
return 1
else:
if self.ord < other.ord:
return -1
elif self.ord > other.ord:
return 1
else:
return 0
class GenericEvent():
'''The event class from which specific events are derived
'''
def __init__(self,time):
self.time = time
self.type = 'Unknown'
def __eq__(self, other):
'''
Equality operator for Generic Events and derived classes.
In the processing of the event list, we have need to remove duplicates. To do this
we rely on the fact that the classes are hashable, and must therefore have an
equality operator (__hash__() and __eq__() must both be defined).
This is the most embarrassing portion of the code, and anyone who knows about OO
programming would find this almost unbelievable. Here we have a base class that
knows specifics about derived classes, thus breaking the very spirit of
OO programming.
I suppose I should go back and restructure the code, perhaps removing the derived
classes altogether. At some point perhaps I will.
'''
if self.time != other.time or self.type != other.type:
return False
# What follows is code that encodes the concept of equality for each derived
# class. Believe it f you dare.
if self.type == 'note':
if self.pitch != other.pitch or self.channel != other.channel:
return False
if self.type == 'tempo':
if self.tempo != other.tempo:
return False
if self.type == 'programChange':
if self.programNumber != other.programNumber or self.channel != other.channel:
return False
if self.type == 'trackName':
if self.trackName != other.trackName:
return False
if self.type == 'controllerEvent':
if self.parameter1 != other.parameter1 or \
self.channel != other.channel or \
self.eventType != other.eventType:
return False
if self.type == 'SysEx':
if self.manID != other.manID:
return False
if self.type == 'UniversalSysEx':
if self.code != other.code or\
self.subcode != other.subcode or \
self.sysExChannel != other.sysExChannel:
return False
return True
def __hash__(self):
'''
Return a hash code for the object.
This is needed for the removal of duplicate objects from the event list. The only
real requirement for the algorithm is that the hash of equal objects must be equal.
There is probably great opportunity for improvements in the hashing function.
'''
# Robert Jenkin's 32 bit hash.
a = int(self.time)
a = (a+0x7ed55d16) + (a<<12)
a = (a^0xc761c23c) ^ (a>>19)
a = (a+0x165667b1) + (a<<5)
a = (a+0xd3a2646c) ^ (a<<9)
a = (a+0xfd7046c5) + (a<<3)
a = (a^0xb55a4f09) ^ (a>>16)
return a
class MIDITrack:
'''A class that encapsulates a MIDI track
'''
# Nested class definitions.
class note(GenericEvent):
'''A class that encapsulates a note
'''
def __init__(self,channel, pitch,time,duration,volume):
GenericEvent.__init__(self,time)
self.pitch = pitch
self.duration = duration
self.volume = volume
self.type = 'note'
self.channel = channel
def compare(self, other):
'''Compare two notes for equality.
'''
if self.pitch == other.pitch and \
self.time == other.time and \
self.duration == other.duration and \
self.volume == other.volume and \
self.type == other.type and \
self.channel == other.channel:
return True
else:
return False
class tempo(GenericEvent):
'''A class that encapsulates a tempo meta-event
'''
def __init__(self,time,tempo):
GenericEvent.__init__(self,time)
self.type = 'tempo'
self.tempo = int(60000000 / tempo)
class programChange(GenericEvent):
'''A class that encapsulates a program change event.
'''
def __init__(self, channel, time, programNumber):
GenericEvent.__init__(self, time,)
self.type = 'programChange'
self.programNumber = programNumber
self.channel = channel
class SysExEvent(GenericEvent):
'''A class that encapsulates a System Exclusive event.
'''
def __init__(self, time, manID, payload):
GenericEvent.__init__(self, time,)
self.type = 'SysEx'
self.manID = manID
self.payload = payload
class UniversalSysExEvent(GenericEvent):
'''A class that encapsulates a Universal System Exclusive event.
'''
def __init__(self, time, realTime, sysExChannel, code, subcode, payload):
GenericEvent.__init__(self, time,)
self.type = 'UniversalSysEx'
self.realTime = realTime
self.sysExChannel = sysExChannel
self.code = code
self.subcode = subcode
self.payload = payload
class ControllerEvent(GenericEvent):
'''A class that encapsulates a program change event.
'''
def __init__(self, channel, time, eventType, parameter1,):
GenericEvent.__init__(self, time,)
self.type = 'controllerEvent'
self.parameter1 = parameter1
self.channel = channel
self.eventType = eventType
class trackName(GenericEvent):
'''A class that encapsulates a program change event.
'''
def __init__(self, time, trackName):
GenericEvent.__init__(self, time,)
self.type = 'trackName'
self.trackName = trackName
def __init__(self, removeDuplicates, deinterleave):
'''Initialize the MIDITrack object.
'''
self.headerString = struct.pack('cccc',b'M',b'T',b'r',b'k')
self.dataLength = 0 # Is calculated after the data is in place
self.MIDIdata = b""
self.closed = False
self.eventList = []
self.MIDIEventList = []
self.remdep = removeDuplicates
self.deinterleave = deinterleave
def addNoteByNumber(self,channel, pitch,time,duration,volume):
'''Add a note by chromatic MIDI number
'''
self.eventList.append(MIDITrack.note(channel, pitch,time,duration,volume))
def addControllerEvent(self,channel,time,eventType, paramerter1):
'''
Add a controller event.
'''
self.eventList.append(MIDITrack.ControllerEvent(channel,time,eventType, \
paramerter1))
def addTempo(self,time,tempo):
'''
Add a tempo change (or set) event.
'''
self.eventList.append(MIDITrack.tempo(time,tempo))
def addSysEx(self,time,manID, payload):
'''
Add a SysEx event.
'''
self.eventList.append(MIDITrack.SysExEvent(time, manID, payload))
def addUniversalSysEx(self,time,code, subcode, payload, sysExChannel=0x7F, \
realTime=False):
'''
Add a Universal SysEx event.
'''
self.eventList.append(MIDITrack.UniversalSysExEvent(time, realTime, \
sysExChannel, code, subcode, payload))
def addProgramChange(self,channel, time, program):
'''
Add a program change event.
'''
self.eventList.append(MIDITrack.programChange(channel, time, program))
def addTrackName(self,time,trackName):
'''
Add a track name event.
'''
self.eventList.append(MIDITrack.trackName(time,trackName))
def changeNoteTuning(self, tunings, sysExChannel=0x7F, realTime=False, \
tuningProgam=0):
'''Change the tuning of MIDI notes
'''
payload = struct.pack('>B', tuningProgam)
payload = payload + struct.pack('>B', len(tunings))
for (noteNumber, frequency) in tunings:
payload = payload + struct.pack('>B', noteNumber)
MIDIFreqency = frequencyTransform(frequency)
for byte in MIDIFreqency:
payload = payload + struct.pack('>B', byte)
self.eventList.append(MIDITrack.UniversalSysExEvent(0, realTime, sysExChannel,\
8, 2, payload))
def processEventList(self):
'''
Process the event list, creating a MIDIEventList
For each item in the event list, one or more events in the MIDIEvent
list are created.
'''
# Loop over all items in the eventList
for thing in self.eventList:
if thing.type == 'note':
event = MIDIEvent()
event.type = "NoteOn"
event.time = thing.time * TICKSPERBEAT
event.pitch = thing.pitch
event.volume = thing.volume
event.channel = thing.channel
event.ord = 3
self.MIDIEventList.append(event)
event = MIDIEvent()
event.type = "NoteOff"
event.time = (thing.time + thing.duration) * TICKSPERBEAT
event.pitch = thing.pitch
event.volume = thing.volume
event.channel = thing.channel
event.ord = 2
self.MIDIEventList.append(event)
elif thing.type == 'tempo':
event = MIDIEvent()
event.type = "Tempo"
event.time = thing.time * TICKSPERBEAT
event.tempo = thing.tempo
event.ord = 3
self.MIDIEventList.append(event)
elif thing.type == 'programChange':
event = MIDIEvent()
event.type = "ProgramChange"
event.time = thing.time * TICKSPERBEAT
event.programNumber = thing.programNumber
event.channel = thing.channel
event.ord = 1
self.MIDIEventList.append(event)
elif thing.type == 'trackName':
event = MIDIEvent()
event.type = "TrackName"
event.time = thing.time * TICKSPERBEAT
event.trackName = thing.trackName
event.ord = 0
self.MIDIEventList.append(event)
elif thing.type == 'controllerEvent':
event = MIDIEvent()
event.type = "ControllerEvent"
event.time = thing.time * TICKSPERBEAT
event.eventType = thing.eventType
event.channel = thing.channel
event.paramerter1 = thing.parameter1
event.ord = 1
self.MIDIEventList.append(event)
elif thing.type == 'SysEx':
event = MIDIEvent()
event.type = "SysEx"
event.time = thing.time * TICKSPERBEAT
event.manID = thing.manID
event.payload = thing.payload
event.ord = 1
self.MIDIEventList.append(event)
elif thing.type == 'UniversalSysEx':
event = MIDIEvent()
event.type = "UniversalSysEx"
event.realTime = thing.realTime
event.sysExChannel = thing.sysExChannel
event.time = thing.time * TICKSPERBEAT
event.code = thing.code
event.subcode = thing.subcode
event.payload = thing.payload
event.ord = 1
self.MIDIEventList.append(event)
else:
print ("Error in MIDITrack: Unknown event type")
sys.exit(2)
# Assumptions in the code expect the list to be time-sorted.
# self.MIDIEventList.sort(lambda x, y: x.time - y.time)
self.MIDIEventList.sort(key=lambda x: (x.time))
if self.deinterleave:
self.deInterleaveNotes()
def removeDuplicates(self):
'''
Remove duplicates from the eventList.
This function will remove duplicates from the eventList. This is necessary
because we the MIDI event stream can become confused otherwise.
'''
# For this algorithm to work, the events in the eventList must be hashable
# (that is, they must have a __hash__() and __eq__() function defined).
tempDict = {}
for item in self.eventList:
tempDict[item] = 1
self.eventList = list(tempDict.keys())
# Sort on type, them on time. Necessary because keys() has no requirement to return
# things in any order.
self.eventList.sort(key=lambda x: (x.type))
self.eventList.sort(key=lambda x: (x.time)) #A bit of a hack.
def closeTrack(self):
'''Called to close a track before writing
This function should be called to "close a track," that is to
prepare the actual data stream for writing. Duplicate events are
removed from the eventList, and the MIDIEventList is created.
Called by the parent MIDIFile object.
'''
if self.closed == True:
return
self.closed = True
if self.remdep:
self.removeDuplicates()
self.processEventList()
def writeMIDIStream(self):
'''
Write the meta data and note data to the packed MIDI stream.
'''
#Process the events in the eventList
self.writeEventsToStream()
# Write MIDI close event.
self.MIDIdata = self.MIDIdata + struct.pack('BBBB',0x00,0xFF, \
0x2F,0x00)
# Calculate the entire length of the data and write to the header
self.dataLength = struct.pack('>L',len(self.MIDIdata))
def writeEventsToStream(self):
'''
Write the events in MIDIEvents to the MIDI stream.
'''
preciseTime = 0.0 # Actual time of event, ignoring round-off
actualTime = 0.0 # Time as written to midi stream, include round-off
for event in self.MIDIEventList:
preciseTime = preciseTime + event.time
# Convert the time to variable length and back, to see how much
# error is introduced
testBuffer = bytes()
varTime = writeVarLength(event.time)
for timeByte in varTime:
testBuffer = testBuffer + struct.pack('>B',timeByte)
(roundedVal,discard) = readVarLength(0,testBuffer)
roundedTime = actualTime + roundedVal
# print "Rounded, Precise: %15.10f %15.10f" % (roundedTime, preciseTime)
# Calculate the delta between the two and apply it to the event time.
delta = preciseTime - roundedTime
event.time = event.time + delta
# Now update the actualTime value, using the updated event time.
testBuffer = bytes()
varTime = writeVarLength(event.time)
for timeByte in varTime:
testBuffer = testBuffer + struct.pack('>B',timeByte)
(roundedVal,discard) = readVarLength(0,testBuffer)
actualTime = actualTime + roundedVal
for event in self.MIDIEventList:
if event.type == "NoteOn":
code = 0x9 << 4 | event.channel
varTime = writeVarLength(event.time)
for timeByte in varTime:
self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte)
self.MIDIdata = self.MIDIdata + struct.pack('>B',code)
self.MIDIdata = self.MIDIdata + struct.pack('>B',event.pitch)
self.MIDIdata = self.MIDIdata + struct.pack('>B',event.volume)
elif event.type == "NoteOff":
code = 0x8 << 4 | event.channel
varTime = writeVarLength(event.time)
for timeByte in varTime:
self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte)
self.MIDIdata = self.MIDIdata + struct.pack('>B',code)
self.MIDIdata = self.MIDIdata + struct.pack('>B',event.pitch)
self.MIDIdata = self.MIDIdata + struct.pack('>B',event.volume)
elif event.type == "Tempo":
code = 0xFF
subcode = 0x51
fourbite = struct.pack('>L', event.tempo)
threebite = fourbite[1:4] # Just discard the MSB
varTime = writeVarLength(event.time)
for timeByte in varTime:
self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte)
self.MIDIdata = self.MIDIdata + struct.pack('>B',code)
self.MIDIdata = self.MIDIdata + struct.pack('>B',subcode)
self.MIDIdata = self.MIDIdata + struct.pack('>B', 0x03) # Data length: 3
self.MIDIdata = self.MIDIdata + threebite
elif event.type == 'ProgramChange':
code = 0xC << 4 | event.channel
varTime = writeVarLength(event.time)
for timeByte in varTime:
self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte)
self.MIDIdata = self.MIDIdata + struct.pack('>B',code)
self.MIDIdata = self.MIDIdata + struct.pack('>B',event.programNumber)
elif event.type == 'TrackName':
varTime = writeVarLength(event.time)
for timeByte in varTime:
self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte)
self.MIDIdata = self.MIDIdata + struct.pack('B',0xFF) # Meta-event
self.MIDIdata = self.MIDIdata + struct.pack('B',0X03) # Event Type
dataLength = len(event.trackName)
dataLenghtVar = writeVarLength(dataLength)
for i in range(0,len(dataLenghtVar)):
self.MIDIdata = self.MIDIdata + struct.pack("b",dataLenghtVar[i])
self.MIDIdata = self.MIDIdata + event.trackName.encode()
elif event.type == "ControllerEvent":
code = 0xB << 4 | event.channel
varTime = writeVarLength(event.time)
for timeByte in varTime:
self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte)
self.MIDIdata = self.MIDIdata + struct.pack('>B',code)
self.MIDIdata = self.MIDIdata + struct.pack('>B',event.eventType)
self.MIDIdata = self.MIDIdata + struct.pack('>B',event.paramerter1)
elif event.type == "SysEx":
code = 0xF0
varTime = writeVarLength(event.time)
for timeByte in varTime:
self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte)
self.MIDIdata = self.MIDIdata + struct.pack('>B', code)
payloadLength = writeVarLength(len(event.payload)+2)
for lenByte in payloadLength:
self.MIDIdata = self.MIDIdata + struct.pack('>B',lenByte)
self.MIDIdata = self.MIDIdata + struct.pack('>B', event.manID)
self.MIDIdata = self.MIDIdata + event.payload
self.MIDIdata = self.MIDIdata + struct.pack('>B',0xF7)
elif event.type == "UniversalSysEx":
code = 0xF0
varTime = writeVarLength(event.time)
for timeByte in varTime:
self.MIDIdata = self.MIDIdata + struct.pack('>B',timeByte)
self.MIDIdata = self.MIDIdata + struct.pack('>B', code)
# Do we need to add a length?
payloadLength = writeVarLength(len(event.payload)+5)
for lenByte in payloadLength:
self.MIDIdata = self.MIDIdata + struct.pack('>B',lenByte)
if event.realTime :
self.MIDIdata = self.MIDIdata + struct.pack('>B', 0x7F)
else:
self.MIDIdata = self.MIDIdata + struct.pack('>B', 0x7E)
self.MIDIdata = self.MIDIdata + struct.pack('>B', event.sysExChannel)
self.MIDIdata = self.MIDIdata + struct.pack('>B', event.code)
self.MIDIdata = self.MIDIdata + struct.pack('>B', event.subcode)
self.MIDIdata = self.MIDIdata + event.payload
self.MIDIdata = self.MIDIdata + struct.pack('>B',0xF7)
def deInterleaveNotes(self):
'''Correct Interleaved notes.
Because we are writing multiple notes in no particular order, we
can have notes which are interleaved with respect to their start
and stop times. This method will correct that. It expects that the
MIDIEventList has been time-ordered.
'''
tempEventList = []
stack = {}
for event in self.MIDIEventList:
if event.type == 'NoteOn':
if str(event.pitch)+str(event.channel) in stack:
stack[str(event.pitch)+str(event.channel)].append(event.time)
else:
stack[str(event.pitch)+str(event.channel)] = [event.time]
tempEventList.append(event)
elif event.type == 'NoteOff':
if len(stack[str(event.pitch)+str(event.channel)]) > 1:
event.time = stack[str(event.pitch)+str(event.channel)].pop()
tempEventList.append(event)
else:
stack[str(event.pitch)+str(event.channel)].pop()
tempEventList.append(event)
else:
tempEventList.append(event)
self.MIDIEventList = tempEventList
# A little trickery here. We want to make sure that NoteOff events appear
# before NoteOn events, so we'll do two sorts -- on on type, one on time.
# This may have to be revisited, as it makes assumptions about how
# the internal sort works, and is in essence creating a sort on a primary
# and secondary key.
self.MIDIEventList.sort(key=lambda x: (x.type))
self.MIDIEventList.sort(key=lambda x: (x.time))
def adjustTime(self,origin):
'''
Adjust Times to be relative, and zero-origined
'''
if len(self.MIDIEventList) == 0:
return
tempEventList = []
runningTime = 0
for event in self.MIDIEventList:
adjustedTime = event.time - origin
event.time = adjustedTime - runningTime
runningTime = adjustedTime
tempEventList.append(event)
self.MIDIEventList = tempEventList
def writeTrack(self,fileHandle):
'''
Write track to disk.
'''
if not self.closed:
self.closeTrack()
fileHandle.write(self.headerString)
fileHandle.write(self.dataLength)
fileHandle.write(self.MIDIdata)
class MIDIHeader:
'''
Class to encapsulate the MIDI header structure.
This class encapsulates a MIDI header structure. It isn't used for much,
but it will create the appropriately packed identifier string that all
MIDI files should contain. It is used by the MIDIFile class to create a
complete and well formed MIDI pattern.
'''
def __init__(self,numTracks):
''' Initialize the data structures
'''
self.headerString = struct.pack('cccc',b'M',b'T',b'h',b'd')
self.headerSize = struct.pack('>L',6)
# Format 1 = multi-track file
self.format = struct.pack('>H',1)
self.numTracks = struct.pack('>H',numTracks)
self.ticksPerBeat = struct.pack('>H',TICKSPERBEAT)
def writeFile(self,fileHandle):
fileHandle.write(self.headerString)
fileHandle.write(self.headerSize)
fileHandle.write(self.format)
fileHandle.write(self.numTracks)
fileHandle.write(self.ticksPerBeat)
class MIDIFile:
'''Class that represents a full, well-formed MIDI pattern.
This is a container object that contains a header, one or more tracks,
and the data associated with a proper and well-formed MIDI pattern.
Calling:
MyMIDI = MidiFile(tracks, removeDuplicates=True, deinterleave=True)
normally
MyMIDI = MidiFile(tracks)
Arguments:
tracks: The number of tracks this object contains
removeDuplicates: If true (the default), the software will remove duplicate
events which have been added. For example, two notes at the same channel,
time, pitch, and duration would be considered duplicate.
deinterleave: If True (the default), overlapping notes (same pitch, same
channel) will be modified so that they do not overlap. Otherwise the sequencing
software will need to figure out how to interpret NoteOff events upon playback.
'''
def __init__(self, numTracks, removeDuplicates=True, deinterleave=True):
'''
Initialize the class
'''
self.header = MIDIHeader(numTracks)
self.tracks = list()
self.numTracks = numTracks
self.closed = False
for i in range(0,numTracks):
self.tracks.append(MIDITrack(removeDuplicates, deinterleave))
# Public Functions. These (for the most part) wrap the MIDITrack functions, where most
# Processing takes place.
def addNote(self,track, channel, pitch,time,duration,volume):
"""
Add notes to the MIDIFile object
Use:
MyMIDI.addNotes(track,channel,pitch,time, duration, volume)
Arguments:
track: The track to which the note is added.
channel: the MIDI channel to assign to the note. [Integer, 0-15]
pitch: the MIDI pitch number [Integer, 0-127].
time: the time (in beats) at which the note sounds [Float].
duration: the duration of the note (in beats) [Float].
volume: the volume (velocity) of the note. [Integer, 0-127].
"""
self.tracks[track].addNoteByNumber(channel, pitch, time, duration, volume)
def addTrackName(self,track, time,trackName):
"""
Add a track name to a MIDI track.
Use:
MyMIDI.addTrackName(track,time,trackName)
Argument:
track: The track to which the name is added. [Integer, 0-127].
time: The time at which the track name is added, in beats [Float].
trackName: The track name. [String].
"""
self.tracks[track].addTrackName(time,trackName)
def addTempo(self,track, time,tempo):
"""
Add a tempo event.
Use:
MyMIDI.addTempo(track, time, tempo)
Arguments:
track: The track to which the event is added. [Integer, 0-127].
time: The time at which the event is added, in beats. [Float].
tempo: The tempo, in Beats per Minute. [Integer]
"""
self.tracks[track].addTempo(time,tempo)
def addProgramChange(self,track, channel, time, program):
"""
Add a MIDI program change event.
Use:
MyMIDI.addProgramChange(track,channel, time, program)
Arguments:
track: The track to which the event is added. [Integer, 0-127].
channel: The channel the event is assigned to. [Integer, 0-15].
time: The time at which the event is added, in beats. [Float].
program: the program number. [Integer, 0-127].
"""
self.tracks[track].addProgramChange(channel, time, program)
def addControllerEvent(self,track, channel,time,eventType, paramerter1):
"""
Add a MIDI controller event.
Use:
MyMIDI.addControllerEvent(track, channel, time, eventType, parameter1)
Arguments:
track: The track to which the event is added. [Integer, 0-127].
channel: The channel the event is assigned to. [Integer, 0-15].
time: The time at which the event is added, in beats. [Float].
eventType: the controller event type.
parameter1: The event's parameter. The meaning of which varies by event type.
"""
self.tracks[track].addControllerEvent(channel,time,eventType, paramerter1)
def changeNoteTuning(self, track, tunings, sysExChannel=0x7F, \
realTime=False, tuningProgam=0):
"""
Change a note's tuning using SysEx change tuning program.
Use:
MyMIDI.changeNoteTuning(track,[tunings],realTime=False, tuningProgram=0)
Arguments:
track: The track to which the event is added. [Integer, 0-127].
tunings: A list of tuples in the form (pitchNumber, frequency).
[[(Integer,Float]]
realTime: Boolean which sets the real-time flag. Defaults to false.
sysExChannel: do note use (see below).
tuningProgram: Tuning program to assign. Defaults to zero. [Integer, 0-127]
In general the sysExChannel should not be changed (parameter will be depreciated).
Also note that many software packages and hardware packages do not implement
this standard!
"""
self.tracks[track].changeNoteTuning(tunings, sysExChannel, realTime,\
tuningProgam)
def writeFile(self,fileHandle):
'''
Write the MIDI File.
Use:
MyMIDI.writeFile(filehandle)
Arguments:
filehandle: a file handle that has been opened for binary writing.
'''
self.header.writeFile(fileHandle)
#Close the tracks and have them create the MIDI event data structures.
self.close()
#Write the MIDI Events to file.
for i in range(0,self.numTracks):
self.tracks[i].writeTrack(fileHandle)
def addSysEx(self,track, time, manID, payload):
"""
Add a SysEx event
Use:
MyMIDI.addSysEx(track,time,ID,payload)
Arguments:
track: The track to which the event is added. [Integer, 0-127].
time: The time at which the event is added, in beats. [Float].
ID: The SysEx ID number
payload: the event payload.
Note: This is a low-level MIDI function, so care must be used in
constructing the payload. It is recommended that higher-level helper
functions be written to wrap this function and construct the payload if
a developer finds him or herself using the function heavily.
"""
self.tracks[track].addSysEx(time,manID, payload)
def addUniversalSysEx(self,track, time,code, subcode, payload, \
sysExChannel=0x7F, realTime=False):
"""
Add a Universal SysEx event.
Use:
MyMIDI.addUniversalSysEx(track, time, code, subcode, payload,\
sysExChannel=0x7f, realTime=False)
Arguments:
track: The track to which the event is added. [Integer, 0-127].
time: The time at which the event is added, in beats. [Float].
code: The even code. [Integer]
subcode The event sub-code [Integer]
payload: The event payload. [Binary string]
sysExChannel: The SysEx channel.
realTime: Sets the real-time flag. Defaults to zero.
Note: This is a low-level MIDI function, so care must be used in
constructing the payload. It is recommended that higher-level helper
functions be written to wrap this function and construct the payload if
a developer finds him or herself using the function heavily. As an example
of such a helper function, see the changeNoteTuning function, both here and
in MIDITrack.
"""
self.tracks[track].addUniversalSysEx(time,code, subcode, payload, sysExChannel,\
realTime)
def shiftTracks(self, offset=0):
"""Shift tracks to be zero-origined, or origined at offset.
Note that the shifting of the time in the tracks uses the MIDIEventList -- in other
words it is assumed to be called in the stage where the MIDIEventList has been
created. This function, however, it meant to operate on the eventList itself.
"""
origin = 1000000 # A little silly, but we'll assume big enough
for track in self.tracks:
if len(track.eventList) > 0:
for event in track.eventList:
if event.time < origin:
origin = event.time
for track in self.tracks:
tempEventList = []
#runningTime = 0
for event in track.eventList:
adjustedTime = event.time - origin
#event.time = adjustedTime - runningTime + offset
event.time = adjustedTime + offset
#runningTime = adjustedTime
tempEventList.append(event)
track.eventList = tempEventList
#End Public Functions ########################
def close(self):
'''Close the MIDIFile for further writing.
To close the File for events, we must close the tracks, adjust the time to be
zero-origined, and have the tracks write to their MIDI Stream data structure.
'''
if self.closed == True:
return
for i in range(0,self.numTracks):
self.tracks[i].closeTrack()
# We want things like program changes to come before notes when they are at the
# same time, so we sort the MIDI events by their ordinality
self.tracks[i].MIDIEventList.sort()
origin = self.findOrigin()
for i in range(0,self.numTracks):
self.tracks[i].adjustTime(origin)
self.tracks[i].writeMIDIStream()
self.closed = True
def findOrigin(self):
'''Find the earliest time in the file's tracks.append.
'''
origin = 1000000 # A little silly, but we'll assume big enough
# Note: This code assumes that the MIDIEventList has been sorted, so this should be insured
# before it is called. It is probably a poor design to do this.
# TODO: -- Consider making this less efficient but more robust by not assuming the list to be sorted.
for track in self.tracks:
if len(track.MIDIEventList) > 0:
if track.MIDIEventList[0].time < origin:
origin = track.MIDIEventList[0].time
return origin
def writeVarLength(i):
'''Accept an input, and write a MIDI-compatible variable length stream
The MIDI format is a little strange, and makes use of so-called variable
length quantities. These quantities are a stream of bytes. If the most
significant bit is 1, then more bytes follow. If it is zero, then the
byte in question is the last in the stream
'''
input = int(i * TICKSPERBEAT)
output = [0,0,0,0]
reversed = [0,0,0,0]
count = 0
result = input & 0x7F
output[count] = result
count = count + 1
input = input >> 7
while input > 0:
result = input & 0x7F
result = result | 0x80
output[count] = result
count = count + 1
input = input >> 7
reversed[0] = output[3]
reversed[1] = output[2]
reversed[2] = output[1]
reversed[3] = output[0]
return reversed[4-count:4]
def readVarLength(offset, buffer):
'''A function to read a MIDI variable length variable.
It returns a tuple of the value read and the number of bytes processed. The
input is an offset into the buffer, and the buffer itself.
'''
toffset = offset
output = 0
bytesRead = 0
while True:
output = output << 7
byte = struct.unpack_from('>B',buffer,toffset)[0]
toffset = toffset + 1
bytesRead = bytesRead + 1
output = output + (byte & 127)
if (byte & 128) == 0:
break
return (output, bytesRead)
def frequencyTransform(freq):
'''Returns a three-byte transform of a frequencyTransform
'''
resolution = 16384
freq = float(freq)
dollars = 69 + 12 * math.log(freq/(float(440)), 2)
firstByte = int(dollars)
lowerFreq = 440 * pow(2.0, ((float(firstByte) - 69.0)/12.0))
if freq != lowerFreq:
centDif = 1200 * math.log( (freq/lowerFreq), 2)
else:
centDif = 0
cents = round(centDif/100 * resolution) # round?
secondByte = min([int(cents)>>7, 0x7F])
thirdByte = cents - (secondByte << 7)
thirdByte = min([thirdByte, 0x7f])
if thirdByte == 0x7f and secondByte == 0x7F and firstByte == 0x7F:
thirdByte = 0x7e
thirdByte = int(thirdByte)
return [firstByte, secondByte, thirdByte]
def returnFrequency(freqBytes):
'''The reverse of frequencyTransform. Given a byte stream, return a frequency.
'''
resolution = 16384.0
baseFrequency = 440 * pow(2.0, (float(freqBytes[0]-69.0)/12.0))
frac = (float((int(freqBytes[1]) << 7) + int(freqBytes[2])) * 100.0) / resolution
frequency = baseFrequency * pow(2.0, frac/1200.0)
return frequency
|
bmac/evilsheep
|
refs/heads/master
|
registration/backends/default/__init__.py
|
71
|
from django.conf import settings
from django.contrib.sites.models import RequestSite
from django.contrib.sites.models import Site
from registration import signals
from registration.forms import RegistrationForm
from registration.models import RegistrationProfile
class DefaultBackend(object):
"""
A registration backend which follows a simple workflow:
1. User signs up, inactive account is created.
2. Email is sent to user with activation link.
3. User clicks activation link, account is now active.
Using this backend requires that
* ``registration`` be listed in the ``INSTALLED_APPS`` setting
(since this backend makes use of models defined in this
application).
* The setting ``ACCOUNT_ACTIVATION_DAYS`` be supplied, specifying
(as an integer) the number of days from registration during
which a user may activate their account (after that period
expires, activation will be disallowed).
* The creation of the templates
``registration/activation_email_subject.txt`` and
``registration/activation_email.txt``, which will be used for
the activation email. See the notes for this backends
``register`` method for details regarding these templates.
Additionally, registration can be temporarily closed by adding the
setting ``REGISTRATION_OPEN`` and setting it to
``False``. Omitting this setting, or setting it to ``True``, will
be interpreted as meaning that registration is currently open and
permitted.
Internally, this is accomplished via storing an activation key in
an instance of ``registration.models.RegistrationProfile``. See
that model and its custom manager for full documentation of its
fields and supported operations.
"""
def register(self, request, **kwargs):
"""
Given a username, email address and password, register a new
user account, which will initially be inactive.
Along with the new ``User`` object, a new
``registration.models.RegistrationProfile`` will be created,
tied to that ``User``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``User`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``registration.signals.user_registered`` will be sent, with
the new ``User`` as the keyword argument ``user`` and the
class of this backend as the sender.
"""
username, email, password = kwargs['username'], kwargs['email'], kwargs['password1']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
new_user = RegistrationProfile.objects.create_inactive_user(username, email,
password, site)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=request)
return new_user
def activate(self, request, activation_key):
"""
Given an an activation key, look up and activate the user
account corresponding to that key (if possible).
After successful activation, the signal
``registration.signals.user_activated`` will be sent, with the
newly activated ``User`` as the keyword argument ``user`` and
the class of this backend as the sender.
"""
activated = RegistrationProfile.objects.activate_user(activation_key)
if activated:
signals.user_activated.send(sender=self.__class__,
user=activated,
request=request)
return activated
def registration_allowed(self, request):
"""
Indicate whether account registration is currently permitted,
based on the value of the setting ``REGISTRATION_OPEN``. This
is determined as follows:
* If ``REGISTRATION_OPEN`` is not specified in settings, or is
set to ``True``, registration is permitted.
* If ``REGISTRATION_OPEN`` is both specified and set to
``False``, registration is not permitted.
"""
return getattr(settings, 'REGISTRATION_OPEN', True)
def get_form_class(self, request):
"""
Return the default form class used for user registration.
"""
return RegistrationForm
def post_registration_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
user registration.
"""
return ('registration_complete', (), {})
def post_activation_redirect(self, request, user):
"""
Return the name of the URL to redirect to after successful
account activation.
"""
return ('registration_activation_complete', (), {})
|
esthermm/odoo-addons
|
refs/heads/8.0
|
procurement_service_project/models/procurement_order.py
|
1
|
# -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
service_project_task = fields.Many2one(
comodel_name='project.task', string='Generated task from procurement',
copy=False)
@api.model
def _run(self, procurement):
task_obj = self.env['project.task']
route = procurement.product_id.route_ids.filtered(lambda r: r.id in [
self.env.ref('procurement_service_project.route_serv_project').id])
if procurement.product_id.type == 'service' and route:
task_obj._create_task_from_procurement_service_project(procurement)
return super(ProcurementOrder, self)._run(procurement)
|
TunedMystic/monq-app
|
refs/heads/master
|
nil/views.py
|
1
|
from django.shortcuts import render
def error404(request):
"""
Render 404 Not Found page.
"""
return render(request, "nil/http404.html")
def error500(request):
"""
Render 500 Internal Server Error page.
"""
return render(request, "nil/http500.html")
|
Emercoin/emercoin
|
refs/heads/master
|
qa/rpc-tests/bip68-sequence.py
|
101
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test BIP68 implementation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.blocktools import *
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "64: non-BIP68-final"
class BIP68Test(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-blockprioritysize=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-blockprioritysize=0", "-acceptnonstdtxn=0"]))
self.is_network_split = False
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
def run_test(self):
# Generate some coins
self.nodes[0].generate(110)
print("Running test disable flag")
self.test_disable_flag()
print("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
print("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
print("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
print("Verifying nVersion=2 transactions aren't standard")
self.test_version2_relay(before_activation=True)
print("Activating BIP68 (and 112/113)")
self.activateCSV()
print("Verifying nVersion=2 transactions are now standard")
self.test_version2_relay(before_activation=False)
print("Passed\n")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx2))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"]
try:
self.nodes[0].sendrawtransaction(rawtx)
except JSONRPCException as exp:
assert(not should_pass and using_sequence_locks)
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(should_pass or not using_sequence_locks)
# Recalculate utxos if we successfully sent the transaction
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
tx.rehash()
try:
node.sendrawtransaction(ToHex(tx))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
assert(orig_tx.hash in node.getrawmempool())
else:
# orig_tx must not be in mempool
assert(orig_tx.hash not in node.getrawmempool())
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(tx2.hash, -1e15, int(-self.relayfee*COIN))
cur_time = int(time.time())
for i in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(tx2.hash, 1e15, int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"]
try:
self.nodes[0].sendrawtransaction(raw_tx5)
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in range(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx3))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].submitblock(ToHex(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert(height < 432)
self.nodes[0].generate(432-height)
assert(get_bip9_status(self.nodes[0], 'csv')['status'] == 'active')
sync_blocks(self.nodes)
# Use self.nodes[1] to test standardness relay policy
def test_version2_relay(self, before_activation):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"]
try:
tx_id = self.nodes[1].sendrawtransaction(tx_signed)
assert(before_activation == False)
except:
assert(before_activation)
if __name__ == '__main__':
BIP68Test().main()
|
teamfx/openjfx-10-dev-rt
|
refs/heads/master
|
modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
|
2
|
# Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import operator
import re
import urllib
import urllib2
import webkitpy.common.config.urls as config_urls
from webkitpy.common.memoized import memoized
from webkitpy.common.net.failuremap import FailureMap
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.networktransaction import NetworkTransaction
from webkitpy.common.net.regressionwindow import RegressionWindow
from webkitpy.common.system.logutils import get_logger
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
_log = get_logger(__file__)
class Builder(object):
def __init__(self, name, buildbot):
self._name = name
self._buildbot = buildbot
self._builds_cache = {}
self._revision_to_build_number = None
from webkitpy.thirdparty.autoinstalled.mechanize import Browser
self._browser = Browser()
self._browser.set_handle_robots(False) # The builder pages are excluded by robots.txt
def name(self):
return self._name
def results_url(self):
return "%s/results/%s" % (self._buildbot.buildbot_url, self.url_encoded_name())
# In addition to per-build results, the build.chromium.org builders also
# keep a directory that accumulates test results over many runs.
def accumulated_results_url(self):
return None
def latest_layout_test_results_url(self):
return self.accumulated_results_url() or self.latest_cached_build().results_url();
@memoized
def latest_layout_test_results(self):
return self.fetch_layout_test_results(self.latest_layout_test_results_url())
def _fetch_file_from_results(self, results_url, file_name):
# It seems this can return None if the url redirects and then returns 404.
result = urllib2.urlopen("%s/%s" % (results_url, file_name))
if not result:
return None
# urlopen returns a file-like object which sometimes works fine with str()
# but sometimes is a addinfourl object. In either case calling read() is correct.
return result.read()
def fetch_layout_test_results(self, results_url):
# FIXME: This should cache that the result was a 404 and stop hitting the network.
results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "full_results.json"))
return LayoutTestResults.results_from_string(results_file)
def url_encoded_name(self):
return urllib.quote(self._name)
def url(self):
return "%s/builders/%s" % (self._buildbot.buildbot_url, self.url_encoded_name())
# This provides a single place to mock
def _fetch_build(self, build_number):
build_dictionary = self._buildbot._fetch_build_dictionary(self, build_number)
if not build_dictionary:
return None
revision_string = build_dictionary['sourceStamp']['revision']
return Build(self,
build_number=int(build_dictionary['number']),
# 'revision' may be None if a trunk build was started by the force-build button on the web page.
revision=(int(revision_string) if revision_string else None),
# Buildbot uses any nubmer other than 0 to mean fail. Since we fetch with
# filter=1, passing builds may contain no 'results' value.
is_green=(not build_dictionary.get('results')),
)
def build(self, build_number):
if not build_number:
return None
cached_build = self._builds_cache.get(build_number)
if cached_build:
return cached_build
build = self._fetch_build(build_number)
self._builds_cache[build_number] = build
return build
def latest_cached_build(self):
revision_build_pairs = self.revision_build_pairs_with_results()
revision_build_pairs.sort(key=lambda i: i[1])
latest_build_number = revision_build_pairs[-1][1]
return self.build(latest_build_number)
def force_build(self, username="webkit-patch", comments=None):
def predicate(form):
try:
return form.find_control("username")
except Exception, e:
return False
# ignore false positives for missing Browser methods - pylint: disable=E1102
self._browser.open(self.url())
self._browser.select_form(predicate=predicate)
self._browser["username"] = username
if comments:
self._browser["comments"] = comments
return self._browser.submit()
file_name_regexp = re.compile(r"r(?P<revision>\d+) \((?P<build_number>\d+)\)")
def _revision_and_build_for_filename(self, filename):
# Example: "r47483 (1)/" or "r47483 (1).zip"
match = self.file_name_regexp.match(filename)
if not match:
return None
return (int(match.group("revision")), int(match.group("build_number")))
def _fetch_revision_to_build_map(self):
# All _fetch requests go through _buildbot for easier mocking
# FIXME: This should use NetworkTransaction's 404 handling instead.
try:
# FIXME: This method is horribly slow due to the huge network load.
# FIXME: This is a poor way to do revision -> build mapping.
# Better would be to ask buildbot through some sort of API.
print "Loading revision/build list from %s." % self.results_url()
print "This may take a while..."
result_files = self._buildbot._fetch_twisted_directory_listing(self.results_url())
except urllib2.HTTPError, error:
if error.code != 404:
raise
_log.debug("Revision/build list failed to load.")
result_files = []
return dict(self._file_info_list_to_revision_to_build_list(result_files))
def _file_info_list_to_revision_to_build_list(self, file_info_list):
# This assumes there was only one build per revision, which is false but we don't care for now.
revisions_and_builds = []
for file_info in file_info_list:
revision_and_build = self._revision_and_build_for_filename(file_info["filename"])
if revision_and_build:
revisions_and_builds.append(revision_and_build)
return revisions_and_builds
def _revision_to_build_map(self):
if not self._revision_to_build_number:
self._revision_to_build_number = self._fetch_revision_to_build_map()
return self._revision_to_build_number
def revision_build_pairs_with_results(self):
return self._revision_to_build_map().items()
# This assumes there can be only one build per revision, which is false, but we don't care for now.
def build_for_revision(self, revision, allow_failed_lookups=False):
# NOTE: This lookup will fail if that exact revision was never built.
build_number = self._revision_to_build_map().get(int(revision))
if not build_number:
return None
build = self.build(build_number)
if not build and allow_failed_lookups:
# Builds for old revisions with fail to lookup via buildbot's json api.
build = Build(self,
build_number=build_number,
revision=revision,
is_green=False,
)
return build
def find_regression_window(self, red_build, look_back_limit=30):
if not red_build or red_build.is_green():
return RegressionWindow(None, None)
common_failures = None
current_build = red_build
build_after_current_build = None
look_back_count = 0
while current_build:
if current_build.is_green():
# current_build can't possibly have any failures in common
# with red_build because it's green.
break
results = current_build.layout_test_results()
# We treat a lack of results as if all the test failed.
# This occurs, for example, when we can't compile at all.
if results:
failures = set(results.failing_tests())
if common_failures == None:
common_failures = failures
else:
common_failures = common_failures.intersection(failures)
if not common_failures:
# current_build doesn't have any failures in common with
# the red build we're worried about. We assume that any
# failures in current_build were due to flakiness.
break
look_back_count += 1
if look_back_count > look_back_limit:
return RegressionWindow(None, current_build, failing_tests=common_failures)
build_after_current_build = current_build
current_build = current_build.previous_build()
# We must iterate at least once because red_build is red.
assert(build_after_current_build)
# Current build must either be green or have no failures in common
# with red build, so we've found our failure transition.
return RegressionWindow(current_build, build_after_current_build, failing_tests=common_failures)
def find_blameworthy_regression_window(self, red_build_number, look_back_limit=30, avoid_flakey_tests=True):
red_build = self.build(red_build_number)
regression_window = self.find_regression_window(red_build, look_back_limit)
if not regression_window.build_before_failure():
return None # We ran off the limit of our search
# If avoid_flakey_tests, require at least 2 bad builds before we
# suspect a real failure transition.
if avoid_flakey_tests and regression_window.failing_build() == red_build:
return None
return regression_window
class Build(object):
def __init__(self, builder, build_number, revision, is_green):
self._builder = builder
self._number = build_number
self._revision = revision
self._is_green = is_green
@staticmethod
def build_url(builder, build_number):
return "%s/builds/%s" % (builder.url(), build_number)
def url(self):
return self.build_url(self.builder(), self._number)
def results_url(self):
results_directory = "r%s (%s)" % (self.revision(), self._number)
return "%s/%s" % (self._builder.results_url(), urllib.quote(results_directory))
def results_zip_url(self):
return "%s.zip" % self.results_url()
@memoized
def layout_test_results(self):
return self._builder.fetch_layout_test_results(self.results_url())
def builder(self):
return self._builder
def revision(self):
return self._revision
def is_green(self):
return self._is_green
def previous_build(self):
# previous_build() allows callers to avoid assuming build numbers are sequential.
# They may not be sequential across all master changes, or when non-trunk builds are made.
return self._builder.build(self._number - 1)
class BuildBot(object):
_builder_factory = Builder
_default_url = config_urls.buildbot_url
def __init__(self, url=None):
self.buildbot_url = url if url else self._default_url
self._builder_by_name = {}
def _parse_last_build_cell(self, builder, cell):
status_link = cell.find('a')
if status_link:
# Will be either a revision number or a build number
revision_string = status_link.string
# If revision_string has non-digits assume it's not a revision number.
builder['built_revision'] = int(revision_string) \
if not re.match('\D', revision_string) \
else None
# FIXME: We treat slave lost as green even though it is not to
# work around the Qts bot being on a broken internet connection.
# The real fix is https://bugs.webkit.org/show_bug.cgi?id=37099
builder['is_green'] = not re.search('fail', cell.renderContents()) or \
not not re.search('lost', cell.renderContents())
status_link_regexp = r"builders/(?P<builder_name>.*)/builds/(?P<build_number>\d+)"
link_match = re.match(status_link_regexp, status_link['href'])
builder['build_number'] = int(link_match.group("build_number"))
else:
# We failed to find a link in the first cell, just give up. This
# can happen if a builder is just-added, the first cell will just
# be "no build"
# Other parts of the code depend on is_green being present.
builder['is_green'] = False
builder['built_revision'] = None
builder['build_number'] = None
def _parse_current_build_cell(self, builder, cell):
activity_lines = cell.renderContents().split("<br />")
builder["activity"] = activity_lines[0] # normally "building" or "idle"
# The middle lines document how long left for any current builds.
match = re.match("(?P<pending_builds>\d) pending", activity_lines[-1])
builder["pending_builds"] = int(match.group("pending_builds")) if match else 0
def _parse_builder_status_from_row(self, status_row):
status_cells = status_row.findAll('td')
builder = {}
# First cell is the name
name_link = status_cells[0].find('a')
builder["name"] = unicode(name_link.string)
self._parse_last_build_cell(builder, status_cells[1])
self._parse_current_build_cell(builder, status_cells[2])
return builder
def _matches_regexps(self, builder_name, name_regexps):
for name_regexp in name_regexps:
if re.match(name_regexp, builder_name):
return True
return False
# FIXME: This method needs to die, but is used by a unit test at the moment.
def _builder_statuses_with_names_matching_regexps(self, builder_statuses, name_regexps):
return [builder for builder in builder_statuses if self._matches_regexps(builder["name"], name_regexps)]
# FIXME: These _fetch methods should move to a networking class.
def _fetch_build_dictionary(self, builder, build_number):
# Note: filter=1 will remove None and {} and '', which cuts noise but can
# cause keys to be missing which you might otherwise expect.
# FIXME: The bot sends a *huge* amount of data for each request, we should
# find a way to reduce the response size further.
json_url = "%s/json/builders/%s/builds/%s?filter=1" % (self.buildbot_url, urllib.quote(builder.name()), build_number)
try:
return json.load(urllib2.urlopen(json_url))
except urllib2.URLError, err:
build_url = Build.build_url(builder, build_number)
_log.error("Error fetching data for %s build %s (%s, json: %s): %s" % (builder.name(), build_number, build_url, json_url, err))
return None
except ValueError, err:
build_url = Build.build_url(builder, build_number)
_log.error("Error decoding json data from %s: %s" % (build_url, err))
return None
def _fetch_one_box_per_builder(self):
build_status_url = "%s/one_box_per_builder" % self.buildbot_url
return urllib2.urlopen(build_status_url)
def _file_cell_text(self, file_cell):
"""Traverses down through firstChild elements until one containing a string is found, then returns that string"""
element = file_cell
while element.string is None and element.contents:
element = element.contents[0]
return element.string
def _parse_twisted_file_row(self, file_row):
string_or_empty = lambda string: unicode(string) if string else u""
file_cells = file_row.findAll('td')
return {
"filename": string_or_empty(self._file_cell_text(file_cells[0])),
"size": string_or_empty(self._file_cell_text(file_cells[1])),
"type": string_or_empty(self._file_cell_text(file_cells[2])),
"encoding": string_or_empty(self._file_cell_text(file_cells[3])),
}
def _parse_twisted_directory_listing(self, page):
soup = BeautifulSoup(page)
# HACK: Match only table rows with a class to ignore twisted header/footer rows.
file_rows = soup.find('table').findAll('tr', {'class': re.compile(r'\b(?:directory|file)\b')})
return [self._parse_twisted_file_row(file_row) for file_row in file_rows]
# FIXME: There should be a better way to get this information directly from twisted.
def _fetch_twisted_directory_listing(self, url):
return self._parse_twisted_directory_listing(urllib2.urlopen(url))
def builders(self):
return [self.builder_with_name(status["name"]) for status in self.builder_statuses()]
# This method pulls from /one_box_per_builder as an efficient way to get information about
def builder_statuses(self):
soup = BeautifulSoup(self._fetch_one_box_per_builder())
return [self._parse_builder_status_from_row(status_row) for status_row in soup.find('table').findAll('tr')]
def builder_with_name(self, name):
builder = self._builder_by_name.get(name)
if not builder:
builder = self._builder_factory(name, self)
self._builder_by_name[name] = builder
return builder
def failure_map(self):
failure_map = FailureMap()
revision_to_failing_bots = {}
for builder_status in self.builder_statuses():
if builder_status["is_green"]:
continue
builder = self.builder_with_name(builder_status["name"])
regression_window = builder.find_blameworthy_regression_window(builder_status["build_number"])
if regression_window:
failure_map.add_regression_window(builder, regression_window)
return failure_map
# This makes fewer requests than calling Builder.latest_build would. It grabs all builder
# statuses in one request using self.builder_statuses (fetching /one_box_per_builder instead of builder pages).
def _latest_builds_from_builders(self):
builder_statuses = self.builder_statuses()
return [self.builder_with_name(status["name"]).build(status["build_number"]) for status in builder_statuses]
def _build_at_or_before_revision(self, build, revision):
while build:
if build.revision() <= revision:
return build
build = build.previous_build()
def _fetch_builder_page(self, builder):
builder_page_url = "%s/builders/%s?numbuilds=100" % (self.buildbot_url, urllib2.quote(builder.name()))
return urllib2.urlopen(builder_page_url)
def _revisions_for_builder(self, builder):
soup = BeautifulSoup(self._fetch_builder_page(builder))
revisions = []
for status_row in soup.find('table').findAll('tr'):
revision_anchor = status_row.find('a')
table_cells = status_row.findAll('td')
if not table_cells or len(table_cells) < 3 or not table_cells[2].string:
continue
if revision_anchor and revision_anchor.string and re.match(r'^\d+$', revision_anchor.string):
revisions.append((int(revision_anchor.string), 'success' in table_cells[2].string))
return revisions
def _find_green_revision(self, builder_revisions):
revision_statuses = {}
for builder in builder_revisions:
for revision, succeeded in builder_revisions[builder]:
revision_statuses.setdefault(revision, set())
if succeeded and revision_statuses[revision] != None:
revision_statuses[revision].add(builder)
else:
revision_statuses[revision] = None
# In descending order, look for a revision X with successful builds
# Once we found X, check if remaining builders succeeded in the neighborhood of X.
revisions_in_order = sorted(revision_statuses.keys(), reverse=True)
for i, revision in enumerate(revisions_in_order):
if not revision_statuses[revision]:
continue
builders_succeeded_in_future = set()
for future_revision in sorted(revisions_in_order[:i + 1]):
if not revision_statuses[future_revision]:
break
builders_succeeded_in_future = builders_succeeded_in_future.union(revision_statuses[future_revision])
builders_succeeded_in_past = set()
for past_revision in revisions_in_order[i:]:
if not revision_statuses[past_revision]:
break
builders_succeeded_in_past = builders_succeeded_in_past.union(revision_statuses[past_revision])
if len(builders_succeeded_in_future) == len(builder_revisions) and len(builders_succeeded_in_past) == len(builder_revisions):
return revision
return None
|
gatgui/lwc
|
refs/heads/master
|
src/modules/objlist.py
|
1
|
# Copyright (C) 2009, 2010 Gaetan Guidet
#
# This file is part of lwc.
#
# lwc is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or (at
# your option) any later version.
#
# lwc is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import lwcpy
class ObjectList(lwcpy.Object):
Methods = {"push" : ([(lwcpy.AD_IN, lwcpy.AT_OBJECT)],
"Append object to list"),
"at" : ([(lwcpy.AD_IN, lwcpy.AT_INT),
(lwcpy.AD_OUT, lwcpy.AT_OBJECT)],
"Get object at given index"),
"set" : ([(lwcpy.AD_IN, lwcpy.AT_INT),
(lwcpy.AD_IN, lwcpy.AT_OBJECT)],
"Replace object at given index"),
"first" : ([(lwcpy.AD_OUT, lwcpy.AT_OBJECT)],
"Get first object in list"),
"last" : ([(lwcpy.AD_OUT, lwcpy.AT_OBJECT)],
"Get last object in list"),
"erase" : ([(lwcpy.AD_IN, lwcpy.AT_INT)],
"Remove object at given index"),
"size" : ([(lwcpy.AD_OUT, lwcpy.AT_INT)],
"Get number of object(s) in list"),
"pop" : ([],
"Remove last object in list"),
"printInt": ([(lwcpy.AD_IN, lwcpy.AT_INT),
(lwcpy.AD_IN, lwcpy.AT_STRING, -1, True, "", "indent")],
"Print an integer number with optional indent")}
Description = "List of lwc::Object instances."
def __init__(self):
lwcpy.Object.__init__(self)
self.lst = []
def size(self):
return len(self.lst)
def push(self, obj):
self.lst.append(obj)
def at(self, idx):
if idx < 0 or idx >= len(self.lst):
raise Exception("Invalid index %s in list" % idx)
return self.lst[idx]
def set(self, idx, obj):
if idx < 0 or idx >= len(self.lst):
raise Exception("Invalid index %s in list" % idx)
self.lst[idx] = obj
def first(self):
if len(self.lst) > 0:
return self.lst[0]
else:
return None
def last(self):
if len(self.lst) > 0:
return self.lst[-1]
else:
return None
def erase(self, idx):
if idx < 0 or idx >= len(self.lst):
raise Exception("Invalid index %s in list" % idx)
self.lst.remove(self.lst[idx])
def pop(self):
if len(self.lst) > 0:
self.lst = self.lst[:-1]
def printInt(self, val, indent=""):
print("%s%s" % (indent, val))
class ObjectList2(ObjectList):
Methods = {"clear": ([],
"Remove all object(s) from list")}
Description = "Clearable list of lwc::Object instances."
def __init__(self):
ObjectList.__init__(self)
def clear(self):
self.lst = []
# Module init
def LWC_ModuleGetTypeCount():
return 2
def LWC_ModuleGetTypeName(idx):
if idx == 0:
return "pytest.ObjectList"
elif idx == 1:
return "pytest.ObjectList2"
else:
return None
def LWC_ModuleGetTypeClass(idx):
if idx == 0:
return ObjectList
elif idx == 1:
return ObjectList2
else:
return None
|
osrf/osrf_pycommon
|
refs/heads/master
|
tests/unit/test_process_utils/impl_aep_asyncio.py
|
2
|
from osrf_pycommon.process_utils import asyncio
from osrf_pycommon.process_utils.async_execute_process import async_execute_process
from osrf_pycommon.process_utils import get_loop
from .impl_aep_protocol import create_protocol
loop = get_loop()
@asyncio.coroutine
def run(cmd, **kwargs):
transport, protocol = yield from async_execute_process(
create_protocol(), cmd, **kwargs)
retcode = yield from protocol.complete
return protocol.stdout_buffer, protocol.stderr_buffer, retcode
|
spthaolt/VTK
|
refs/heads/5.10.1_vs2013
|
Utilities/vtkTclTest2Py/info.py
|
10
|
"""Selective implementation of tcl info command.
This script is used while running python tests translated from Tcl."""
import re
def command (caller_globals, caller_locals, pattern):
return commands(caller_globals, caller_locals, pattern)
def commands (caller_globals, caller_locals, pattern):
print "pattern %s" % pattern
rex = re.compile(pattern)
str = ""
for c in caller_globals.keys():
if rex.match(c):
str += c + " "
for c in caller_locals.keys():
if rex.match(c):
str += c + " "
return str.strip()
|
pbrady/sympy
|
refs/heads/master
|
sympy/core/tests/test_subs.py
|
24
|
from __future__ import division
from sympy import (Symbol, Wild, sin, cos, exp, sqrt, pi, Function, Derivative,
abc, Integer, Eq, symbols, Add, I, Float, log, Rational, Lambda, atan2,
cse, cot, tan, S, Tuple, Basic, Dict, Piecewise, oo, Mul,
factor, nsimplify, zoo, Subs)
from sympy.core.basic import _aresame
from sympy.utilities.pytest import XFAIL
from sympy.abc import x, y, z
def test_subs():
n3 = Rational(3)
e = x
e = e.subs(x, n3)
assert e == Rational(3)
e = 2*x
assert e == 2*x
e = e.subs(x, n3)
assert e == Rational(6)
def test_trigonometric():
n3 = Rational(3)
e = (sin(x)**2).diff(x)
assert e == 2*sin(x)*cos(x)
e = e.subs(x, n3)
assert e == 2*cos(n3)*sin(n3)
e = (sin(x)**2).diff(x)
assert e == 2*sin(x)*cos(x)
e = e.subs(sin(x), cos(x))
assert e == 2*cos(x)**2
assert exp(pi).subs(exp, sin) == 0
assert cos(exp(pi)).subs(exp, sin) == 1
i = Symbol('i', integer=True)
zoo = S.ComplexInfinity
assert tan(x).subs(x, pi/2) is zoo
assert cot(x).subs(x, pi) is zoo
assert cot(i*x).subs(x, pi) is zoo
assert tan(i*x).subs(x, pi/2) == tan(i*pi/2)
assert tan(i*x).subs(x, pi/2).subs(i, 1) is zoo
o = Symbol('o', odd=True)
assert tan(o*x).subs(x, pi/2) == tan(o*pi/2)
def test_powers():
assert sqrt(1 - sqrt(x)).subs(x, 4) == I
assert (sqrt(1 - x**2)**3).subs(x, 2) == - 3*I*sqrt(3)
assert (x**Rational(1, 3)).subs(x, 27) == 3
assert (x**Rational(1, 3)).subs(x, -27) == 3*(-1)**Rational(1, 3)
assert ((-x)**Rational(1, 3)).subs(x, 27) == 3*(-1)**Rational(1, 3)
n = Symbol('n', negative=True)
assert (x**n).subs(x, 0) is S.ComplexInfinity
assert exp(-1).subs(S.Exp1, 0) is S.ComplexInfinity
assert (x**(4.0*y)).subs(x**(2.0*y), n) == n**2.0
assert (2**(x + 2)).subs(2, 3) == 3**(x + 3)
def test_logexppow(): # no eval()
x = Symbol('x', real=True)
w = Symbol('w')
e = (3**(1 + x) + 2**(1 + x))/(3**x + 2**x)
assert e.subs(2**x, w) != e
assert e.subs(exp(x*log(Rational(2))), w) != e
def test_bug():
x1 = Symbol('x1')
x2 = Symbol('x2')
y = x1*x2
assert y.subs(x1, Float(3.0)) == Float(3.0)*x2
def test_subbug1():
# see that they don't fail
(x**x).subs(x, 1)
(x**x).subs(x, 1.0)
def test_subbug2():
# Ensure this does not cause infinite recursion
assert Float(7.7).epsilon_eq(abs(x).subs(x, -7.7))
def test_dict_set():
a, b, c = map(Wild, 'abc')
f = 3*cos(4*x)
r = f.match(a*cos(b*x))
assert r == {a: 3, b: 4}
e = a/b*sin(b*x)
assert e.subs(r) == r[a]/r[b]*sin(r[b]*x)
assert e.subs(r) == 3*sin(4*x) / 4
s = set(r.items())
assert e.subs(s) == r[a]/r[b]*sin(r[b]*x)
assert e.subs(s) == 3*sin(4*x) / 4
assert e.subs(r) == r[a]/r[b]*sin(r[b]*x)
assert e.subs(r) == 3*sin(4*x) / 4
assert x.subs(Dict((x, 1))) == 1
def test_dict_ambigous(): # see issue 3566
y = Symbol('y')
z = Symbol('z')
f = x*exp(x)
g = z*exp(z)
df = {x: y, exp(x): y}
dg = {z: y, exp(z): y}
assert f.subs(df) == y**2
assert g.subs(dg) == y**2
# and this is how order can affect the result
assert f.subs(x, y).subs(exp(x), y) == y*exp(y)
assert f.subs(exp(x), y).subs(x, y) == y**2
# length of args and count_ops are the same so
# default_sort_key resolves ordering...if one
# doesn't want this result then an unordered
# sequence should not be used.
e = 1 + x*y
assert e.subs({x: y, y: 2}) == 5
# here, there are no obviously clashing keys or values
# but the results depend on the order
assert exp(x/2 + y).subs(dict([(exp(y + 1), 2), (x, 2)])) == exp(y + 1)
def test_deriv_sub_bug3():
y = Symbol('y')
f = Function('f')
pat = Derivative(f(x), x, x)
assert pat.subs(y, y**2) == Derivative(f(x), x, x)
assert pat.subs(y, y**2) != Derivative(f(x), x)
def test_equality_subs1():
f = Function('f')
x = abc.x
eq = Eq(f(x)**2, x)
res = Eq(Integer(16), x)
assert eq.subs(f(x), 4) == res
def test_equality_subs2():
f = Function('f')
x = abc.x
eq = Eq(f(x)**2, 16)
assert bool(eq.subs(f(x), 3)) is False
assert bool(eq.subs(f(x), 4)) is True
def test_issue_3742():
y = Symbol('y')
e = sqrt(x)*exp(y)
assert e.subs(sqrt(x), 1) == exp(y)
def test_subs_dict1():
x, y = symbols('x y')
assert (1 + x*y).subs(x, pi) == 1 + pi*y
assert (1 + x*y).subs({x: pi, y: 2}) == 1 + 2*pi
c2, c3, q1p, q2p, c1, s1, s2, s3 = symbols('c2 c3 q1p q2p c1 s1 s2 s3')
test = (c2**2*q2p*c3 + c1**2*s2**2*q2p*c3 + s1**2*s2**2*q2p*c3
- c1**2*q1p*c2*s3 - s1**2*q1p*c2*s3)
assert (test.subs({c1**2: 1 - s1**2, c2**2: 1 - s2**2, c3**3: 1 - s3**2})
== c3*q2p*(1 - s2**2) + c3*q2p*s2**2*(1 - s1**2)
- c2*q1p*s3*(1 - s1**2) + c3*q2p*s1**2*s2**2 - c2*q1p*s3*s1**2)
def test_mul():
x, y, z, a, b, c = symbols('x y z a b c')
A, B, C = symbols('A B C', commutative=0)
assert (x*y*z).subs(z*x, y) == y**2
assert (z*x).subs(1/x, z) == z*x
assert (x*y/z).subs(1/z, a) == a*x*y
assert (x*y/z).subs(x/z, a) == a*y
assert (x*y/z).subs(y/z, a) == a*x
assert (x*y/z).subs(x/z, 1/a) == y/a
assert (x*y/z).subs(x, 1/a) == y/(z*a)
assert (2*x*y).subs(5*x*y, z) != 2*z/5
assert (x*y*A).subs(x*y, a) == a*A
assert (x**2*y**(3*x/2)).subs(x*y**(x/2), 2) == 4*y**(x/2)
assert (x*exp(x*2)).subs(x*exp(x), 2) == 2*exp(x)
assert ((x**(2*y))**3).subs(x**y, 2) == 64
assert (x*A*B).subs(x*A, y) == y*B
assert (x*y*(1 + x)*(1 + x*y)).subs(x*y, 2) == 6*(1 + x)
assert ((1 + A*B)*A*B).subs(A*B, x*A*B)
assert (x*a/z).subs(x/z, A) == a*A
assert (x**3*A).subs(x**2*A, a) == a*x
assert (x**2*A*B).subs(x**2*B, a) == a*A
assert (x**2*A*B).subs(x**2*A, a) == a*B
assert (b*A**3/(a**3*c**3)).subs(a**4*c**3*A**3/b**4, z) == \
b*A**3/(a**3*c**3)
assert (6*x).subs(2*x, y) == 3*y
assert (y*exp(3*x/2)).subs(y*exp(x), 2) == 2*exp(x/2)
assert (y*exp(3*x/2)).subs(y*exp(x), 2) == 2*exp(x/2)
assert (A**2*B*A**2*B*A**2).subs(A*B*A, C) == A*C**2*A
assert (x*A**3).subs(x*A, y) == y*A**2
assert (x**2*A**3).subs(x*A, y) == y**2*A
assert (x*A**3).subs(x*A, B) == B*A**2
assert (x*A*B*A*exp(x*A*B)).subs(x*A, B) == B**2*A*exp(B*B)
assert (x**2*A*B*A*exp(x*A*B)).subs(x*A, B) == B**3*exp(B**2)
assert (x**3*A*exp(x*A*B)*A*exp(x*A*B)).subs(x*A, B) == \
x*B*exp(B**2)*B*exp(B**2)
assert (x*A*B*C*A*B).subs(x*A*B, C) == C**2*A*B
assert (-I*a*b).subs(a*b, 2) == -2*I
# issue 6361
assert (-8*I*a).subs(-2*a, 1) == 4*I
assert (-I*a).subs(-a, 1) == I
# issue 6441
assert (4*x**2).subs(2*x, y) == y**2
assert (2*4*x**2).subs(2*x, y) == 2*y**2
assert (-x**3/9).subs(-x/3, z) == -z**2*x
assert (-x**3/9).subs(x/3, z) == -z**2*x
assert (-2*x**3/9).subs(x/3, z) == -2*x*z**2
assert (-2*x**3/9).subs(-x/3, z) == -2*x*z**2
assert (-2*x**3/9).subs(-2*x, z) == z*x**2/9
assert (-2*x**3/9).subs(2*x, z) == -z*x**2/9
assert (2*(3*x/5/7)**2).subs(3*x/5, z) == 2*(S(1)/7)**2*z**2
assert (4*x).subs(-2*x, z) == 4*x # try keep subs literal
def test_subs_simple():
a = symbols('a', commutative=True)
x = symbols('x', commutative=False)
assert (2*a).subs(1, 3) == 2*a
assert (2*a).subs(2, 3) == 3*a
assert (2*a).subs(a, 3) == 6
assert sin(2).subs(1, 3) == sin(2)
assert sin(2).subs(2, 3) == sin(3)
assert sin(a).subs(a, 3) == sin(3)
assert (2*x).subs(1, 3) == 2*x
assert (2*x).subs(2, 3) == 3*x
assert (2*x).subs(x, 3) == 6
assert sin(x).subs(x, 3) == sin(3)
def test_subs_constants():
a, b = symbols('a b', commutative=True)
x, y = symbols('x y', commutative=False)
assert (a*b).subs(2*a, 1) == a*b
assert (1.5*a*b).subs(a, 1) == 1.5*b
assert (2*a*b).subs(2*a, 1) == b
assert (2*a*b).subs(4*a, 1) == 2*a*b
assert (x*y).subs(2*x, 1) == x*y
assert (1.5*x*y).subs(x, 1) == 1.5*y
assert (2*x*y).subs(2*x, 1) == y
assert (2*x*y).subs(4*x, 1) == 2*x*y
def test_subs_commutative():
a, b, c, d, K = symbols('a b c d K', commutative=True)
assert (a*b).subs(a*b, K) == K
assert (a*b*a*b).subs(a*b, K) == K**2
assert (a*a*b*b).subs(a*b, K) == K**2
assert (a*b*c*d).subs(a*b*c, K) == d*K
assert (a*b**c).subs(a, K) == K*b**c
assert (a*b**c).subs(b, K) == a*K**c
assert (a*b**c).subs(c, K) == a*b**K
assert (a*b*c*b*a).subs(a*b, K) == c*K**2
assert (a**3*b**2*a).subs(a*b, K) == a**2*K**2
def test_subs_noncommutative():
w, x, y, z, L = symbols('w x y z L', commutative=False)
assert (x*y).subs(x*y, L) == L
assert (w*y*x).subs(x*y, L) == w*y*x
assert (w*x*y*z).subs(x*y, L) == w*L*z
assert (x*y*x*y).subs(x*y, L) == L**2
assert (x*x*y).subs(x*y, L) == x*L
assert (x*x*y*y).subs(x*y, L) == x*L*y
assert (w*x*y).subs(x*y*z, L) == w*x*y
assert (x*y**z).subs(x, L) == L*y**z
assert (x*y**z).subs(y, L) == x*L**z
assert (x*y**z).subs(z, L) == x*y**L
assert (w*x*y*z*x*y).subs(x*y*z, L) == w*L*x*y
assert (w*x*y*y*w*x*x*y*x*y*y*x*y).subs(x*y, L) == w*L*y*w*x*L**2*y*L
def test_subs_basic_funcs():
a, b, c, d, K = symbols('a b c d K', commutative=True)
w, x, y, z, L = symbols('w x y z L', commutative=False)
assert (x + y).subs(x + y, L) == L
assert (x - y).subs(x - y, L) == L
assert (x/y).subs(x, L) == L/y
assert (x**y).subs(x, L) == L**y
assert (x**y).subs(y, L) == x**L
assert ((a - c)/b).subs(b, K) == (a - c)/K
assert (exp(x*y - z)).subs(x*y, L) == exp(L - z)
assert (a*exp(x*y - w*z) + b*exp(x*y + w*z)).subs(z, 0) == \
a*exp(x*y) + b*exp(x*y)
assert ((a - b)/(c*d - a*b)).subs(c*d - a*b, K) == (a - b)/K
assert (w*exp(a*b - c)*x*y/4).subs(x*y, L) == w*exp(a*b - c)*L/4
def test_subs_wild():
R, S, T, U = symbols('R S T U', cls=Wild)
assert (R*S).subs(R*S, T) == T
assert (S*R).subs(R*S, T) == T
assert (R + S).subs(R + S, T) == T
assert (R**S).subs(R, T) == T**S
assert (R**S).subs(S, T) == R**T
assert (R*S**T).subs(R, U) == U*S**T
assert (R*S**T).subs(S, U) == R*U**T
assert (R*S**T).subs(T, U) == R*S**U
def test_subs_mixed():
a, b, c, d, K = symbols('a b c d K', commutative=True)
w, x, y, z, L = symbols('w x y z L', commutative=False)
R, S, T, U = symbols('R S T U', cls=Wild)
assert (a*x*y).subs(x*y, L) == a*L
assert (a*b*x*y*x).subs(x*y, L) == a*b*L*x
assert (R*x*y*exp(x*y)).subs(x*y, L) == R*L*exp(L)
assert (a*x*y*y*x - x*y*z*exp(a*b)).subs(x*y, L) == a*L*y*x - L*z*exp(a*b)
e = c*y*x*y*x**(R*S - a*b) - T*(a*R*b*S)
assert e.subs(x*y, L).subs(a*b, K).subs(R*S, U) == \
c*y*L*x**(U - K) - T*(U*K)
def test_division():
a, b, c = symbols('a b c', commutative=True)
x, y, z = symbols('x y z', commutative=True)
assert (1/a).subs(a, c) == 1/c
assert (1/a**2).subs(a, c) == 1/c**2
assert (1/a**2).subs(a, -2) == Rational(1, 4)
assert (-(1/a**2)).subs(a, -2) == -Rational(1, 4)
assert (1/x).subs(x, z) == 1/z
assert (1/x**2).subs(x, z) == 1/z**2
assert (1/x**2).subs(x, -2) == Rational(1, 4)
assert (-(1/x**2)).subs(x, -2) == -Rational(1, 4)
#issue 5360
assert (1/x).subs(x, 0) == 1/S(0)
def test_add():
a, b, c, d, x, y, t = symbols('a b c d x y t')
assert (a**2 - b - c).subs(a**2 - b, d) in [d - c, a**2 - b - c]
assert (a**2 - c).subs(a**2 - c, d) == d
assert (a**2 - b - c).subs(a**2 - c, d) in [d - b, a**2 - b - c]
assert (a**2 - x - c).subs(a**2 - c, d) in [d - x, a**2 - x - c]
assert (a**2 - b - sqrt(a)).subs(a**2 - sqrt(a), c) == c - b
assert (a + b + exp(a + b)).subs(a + b, c) == c + exp(c)
assert (c + b + exp(c + b)).subs(c + b, a) == a + exp(a)
assert (a + b + c + d).subs(b + c, x) == a + d + x
assert (a + b + c + d).subs(-b - c, x) == a + d - x
assert ((x + 1)*y).subs(x + 1, t) == t*y
assert ((-x - 1)*y).subs(x + 1, t) == -t*y
assert ((x - 1)*y).subs(x + 1, t) == y*(t - 2)
assert ((-x + 1)*y).subs(x + 1, t) == y*(-t + 2)
# this should work everytime:
e = a**2 - b - c
assert e.subs(Add(*e.args[:2]), d) == d + e.args[2]
assert e.subs(a**2 - c, d) == d - b
# the fallback should recognize when a change has
# been made; while .1 == Rational(1, 10) they are not the same
# and the change should be made
assert (0.1 + a).subs(0.1, Rational(1, 10)) == Rational(1, 10) + a
e = (-x*(-y + 1) - y*(y - 1))
ans = (-x*(x) - y*(-x)).expand()
assert e.subs(-y + 1, x) == ans
def test_subs_issue_4009():
assert (I*Symbol('a')).subs(1, 2) == I*Symbol('a')
def test_functions_subs():
x, y = symbols('x y')
f, g = symbols('f g', cls=Function)
l = Lambda((x, y), sin(x) + y)
assert (g(y, x) + cos(x)).subs(g, l) == sin(y) + x + cos(x)
assert (f(x)**2).subs(f, sin) == sin(x)**2
assert (f(x, y)).subs(f, log) == log(x, y)
assert (f(x, y)).subs(f, sin) == f(x, y)
assert (sin(x) + atan2(x, y)).subs([[atan2, f], [sin, g]]) == \
f(x, y) + g(x)
assert (g(f(x + y, x))).subs([[f, l], [g, exp]]) == exp(x + sin(x + y))
def test_derivative_subs():
y = Symbol('y')
f = Function('f')
assert Derivative(f(x), x).subs(f(x), y) != 0
assert Derivative(f(x), x).subs(f(x), y).subs(y, f(x)) == \
Derivative(f(x), x)
# issues 5085, 5037
assert cse(Derivative(f(x), x) + f(x))[1][0].has(Derivative)
assert cse(Derivative(f(x, y), x) +
Derivative(f(x, y), y))[1][0].has(Derivative)
def test_derivative_subs2():
x, y, z = symbols('x y z')
f, g = symbols('f g', cls=Function)
assert Derivative(f, x, y).subs(Derivative(f, x, y), g) == g
assert Derivative(f, y, x).subs(Derivative(f, x, y), g) == g
assert Derivative(f, x, y).subs(Derivative(f, x), g) == Derivative(g, y)
assert Derivative(f, x, y).subs(Derivative(f, y), g) == Derivative(g, x)
assert (Derivative(f(x, y, z), x, y, z).subs(
Derivative(f(x, y, z), x, z), g) == Derivative(g, y))
assert (Derivative(f(x, y, z), x, y, z).subs(
Derivative(f(x, y, z), z, y), g) == Derivative(g, x))
assert (Derivative(f(x, y, z), x, y, z).subs(
Derivative(f(x, y, z), z, y, x), g) == g)
def test_derivative_subs3():
x = Symbol('x')
dex = Derivative(exp(x), x)
assert Derivative(dex, x).subs(dex, exp(x)) == dex
assert dex.subs(exp(x), dex) == Derivative(exp(x), x, x)
def test_issue_5284():
A, B = symbols('A B', commutative=False)
assert (x*A).subs(x**2*A, B) == x*A
assert (A**2).subs(A**3, B) == A**2
assert (A**6).subs(A**3, B) == B**2
def test_subs_iter():
assert x.subs(reversed([[x, y]])) == y
it = iter([[x, y]])
assert x.subs(it) == y
assert x.subs(Tuple((x, y))) == y
def test_subs_dict():
a, b, c, d, e = symbols('a b c d e')
z = symbols('z')
assert (2*x + y + z).subs(dict(x=1, y=2)) == 4 + z
l = [(sin(x), 2), (x, 1)]
assert (sin(x)).subs(l) == \
(sin(x)).subs(dict(l)) == 2
assert sin(x).subs(reversed(l)) == sin(1)
expr = sin(2*x) + sqrt(sin(2*x))*cos(2*x)*sin(exp(x)*x)
reps = dict([
(sin(2*x), c),
(sqrt(sin(2*x)), a),
(cos(2*x), b),
(exp(x), e),
(x, d),
])
assert expr.subs(reps) == c + a*b*sin(d*e)
l = [(x, 3), (y, x**2)]
assert (x + y).subs(l) == 3 + x**2
assert (x + y).subs(reversed(l)) == 12
# If changes are made to convert lists into dictionaries and do
# a dictionary-lookup replacement, these tests will help to catch
# some logical errors that might occur
l = [(y, z + 2), (1 + z, 5), (z, 2)]
assert (y - 1 + 3*x).subs(l) == 5 + 3*x
l = [(y, z + 2), (z, 3)]
assert (y - 2).subs(l) == 3
def test_no_arith_subs_on_floats():
a, x, y = symbols('a x y')
assert (x + 3).subs(x + 3, a) == a
assert (x + 3).subs(x + 2, a) == a + 1
assert (x + y + 3).subs(x + 3, a) == a + y
assert (x + y + 3).subs(x + 2, a) == a + y + 1
assert (x + 3.0).subs(x + 3.0, a) == a
assert (x + 3.0).subs(x + 2.0, a) == x + 3.0
assert (x + y + 3.0).subs(x + 3.0, a) == a + y
assert (x + y + 3.0).subs(x + 2.0, a) == x + y + 3.0
def test_issue_5651():
a, b, c, K = symbols('a b c K', commutative=True)
x, y, z = symbols('x y z')
assert (a/(b*c)).subs(b*c, K) == a/K
assert (a/(b**2*c**3)).subs(b*c, K) == a/(c*K**2)
assert (1/(x*y)).subs(x*y, 2) == S.Half
assert ((1 + x*y)/(x*y)).subs(x*y, 1) == 2
assert (x*y*z).subs(x*y, 2) == 2*z
assert ((1 + x*y)/(x*y)/z).subs(x*y, 1) == 2/z
def test_issue_6075():
assert Tuple(1, True).subs(1, 2) == Tuple(2, True)
def test_issue_6079():
# since x + 2.0 == x + 2 we can't do a simple equality test
x = symbols('x')
assert _aresame((x + 2.0).subs(2, 3), x + 2.0)
assert _aresame((x + 2.0).subs(2.0, 3), x + 3)
assert not _aresame(x + 2, x + 2.0)
assert not _aresame(Basic(cos, 1), Basic(cos, 1.))
assert _aresame(cos, cos)
assert not _aresame(1, S(1))
assert not _aresame(x, symbols('x', positive=True))
def test_issue_4680():
N = Symbol('N')
assert N.subs(dict(N=3)) == 3
def test_issue_6158():
assert (x - 1).subs(1, y) == x - y
assert (x - 1).subs(-1, y) == x + y
assert (x - oo).subs(oo, y) == x - y
assert (x - oo).subs(-oo, y) == x + y
def test_Function_subs():
from sympy.abc import x, y
f, g, h, i = symbols('f g h i', cls=Function)
p = Piecewise((g(f(x, y)), x < -1), (g(x), x <= 1))
assert p.subs(g, h) == Piecewise((h(f(x, y)), x < -1), (h(x), x <= 1))
assert (f(y) + g(x)).subs({f: h, g: i}) == i(x) + h(y)
def test_simultaneous_subs():
reps = {x: 0, y: 0}
assert (x/y).subs(reps) != (y/x).subs(reps)
assert (x/y).subs(reps, simultaneous=True) == \
(y/x).subs(reps, simultaneous=True)
reps = reps.items()
assert (x/y).subs(reps) != (y/x).subs(reps)
assert (x/y).subs(reps, simultaneous=True) == \
(y/x).subs(reps, simultaneous=True)
assert Derivative(x, y, z).subs(reps, simultaneous=True) == \
Subs(Derivative(0, y, z), (y,), (0,))
def test_issue_6419_6421():
assert (1/(1 + x/y)).subs(x/y, x) == 1/(1 + x)
assert (-2*I).subs(2*I, x) == -x
assert (-I*x).subs(I*x, x) == -x
assert (-3*I*y**4).subs(3*I*y**2, x) == -x*y**2
def test_issue_6559():
assert (-12*x + y).subs(-x, 1) == 12 + y
# though this involves cse it generated a failure in Mul._eval_subs
x0, x1 = symbols('x0 x1')
e = -log(-12*sqrt(2) + 17)/24 - log(-2*sqrt(2) + 3)/12 + sqrt(2)/3
# XXX modify cse so x1 is eliminated and x0 = -sqrt(2)?
assert cse(e) == (
[(x0, sqrt(2))], [x0/3 - log(-12*x0 + 17)/24 - log(-2*x0 + 3)/12])
def test_issue_5261():
x = symbols('x', real=True)
e = I*x
assert exp(e).subs(exp(x), y) == y**I
assert (2**e).subs(2**x, y) == y**I
eq = (-2)**e
assert eq.subs((-2)**x, y) == eq
def test_issue_6923():
assert (-2*x*sqrt(2)).subs(2*x, y) == -sqrt(2)*y
def test_2arg_hack():
N = Symbol('N', commutative=False)
ans = Mul(2, y + 1, evaluate=False)
assert (2*x*(y + 1)).subs(x, 1, hack2=True) == ans
assert (2*(y + 1 + N)).subs(N, 0, hack2=True) == ans
@XFAIL
def test_mul2():
"""When this fails, remove things labelled "2-arg hack"
1) remove special handling in the fallback of subs that
was added in the same commit as this test
2) remove the special handling in Mul.flatten
"""
assert (2*(x + 1)).is_Mul
def test_noncommutative_subs():
x,y = symbols('x,y', commutative=False)
assert (x*y*x).subs([(x,x*y),(y,x)],simultaneous=True) == (x*y*x**2*y)
def test_issue_2877():
f = Float(2.0)
assert (x + f).subs({f: 2}) == x + 2
def r(a,b,c):
return factor(a*x**2 + b*x + c)
e = r(5/6, 10, 5)
assert nsimplify(e) == 5*x**2/6 + 10*x + 5
def test_issue_5910():
t = Symbol('t')
assert (1/(1 - t)).subs(t, 1) == zoo
n = t
d = t - 1
assert (n/d).subs(t, 1) == zoo
assert (-n/-d).subs(t, 1) == zoo
def test_issue_5217():
s = Symbol('s')
z = (1 - 2*x*x)
w = (1 + 2*x*x)
q = 2*x*x*2*y*y
sub = {2*x*x: s}
assert w.subs(sub) == 1 + s
assert z.subs(sub) == 1 - s
assert q == 4*x**2*y**2
assert q.subs(sub) == 2*y**2*s
|
peletiah/nal_to_gpx
|
refs/heads/master
|
geopy/geocoders/__init__.py
|
14
|
from geopy.geocoders.bing import Bing
from geopy.geocoders.google import Google
from geopy.geocoders.googlev3 import GoogleV3
from geopy.geocoders.dot_us import GeocoderDotUS
from geopy.geocoders.geonames import GeoNames
from geopy.geocoders.wiki_gis import MediaWiki
from geopy.geocoders.wiki_semantic import SemanticMediaWiki
from geopy.geocoders.yahoo import Yahoo
from geopy.geocoders.openmapquest import OpenMapQuest
from geopy.geocoders.mapquest import MapQuest
|
zpzgone/paramiko
|
refs/heads/master
|
paramiko/pkey.py
|
33
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Common API for all public keys.
"""
import base64
from binascii import hexlify, unhexlify
import os
from hashlib import md5
from Crypto.Cipher import DES3, AES
from paramiko import util
from paramiko.common import o600, zero_byte
from paramiko.py3compat import u, encodebytes, decodebytes, b
from paramiko.ssh_exception import SSHException, PasswordRequiredException
class PKey (object):
"""
Base class for public keys.
"""
# known encryption types for private key files:
_CIPHER_TABLE = {
'AES-128-CBC': {'cipher': AES, 'keysize': 16, 'blocksize': 16, 'mode': AES.MODE_CBC},
'DES-EDE3-CBC': {'cipher': DES3, 'keysize': 24, 'blocksize': 8, 'mode': DES3.MODE_CBC},
}
def __init__(self, msg=None, data=None):
"""
Create a new instance of this public key type. If ``msg`` is given,
the key's public part(s) will be filled in from the message. If
``data`` is given, the key's public part(s) will be filled in from
the string.
:param .Message msg:
an optional SSH `.Message` containing a public key of this type.
:param str data: an optional string containing a public key of this type
:raises SSHException:
if a key cannot be created from the ``data`` or ``msg`` given, or
no key was passed in.
"""
pass
def asbytes(self):
"""
Return a string of an SSH `.Message` made up of the public part(s) of
this key. This string is suitable for passing to `__init__` to
re-create the key object later.
"""
return bytes()
def __str__(self):
return self.asbytes()
# noinspection PyUnresolvedReferences
def __cmp__(self, other):
"""
Compare this key to another. Returns 0 if this key is equivalent to
the given key, or non-0 if they are different. Only the public parts
of the key are compared, so a public key will compare equal to its
corresponding private key.
:param .Pkey other: key to compare to.
"""
hs = hash(self)
ho = hash(other)
if hs != ho:
return cmp(hs, ho)
return cmp(self.asbytes(), other.asbytes())
def __eq__(self, other):
return hash(self) == hash(other)
def get_name(self):
"""
Return the name of this private key implementation.
:return:
name of this private key type, in SSH terminology, as a `str` (for
example, ``"ssh-rsa"``).
"""
return ''
def get_bits(self):
"""
Return the number of significant bits in this key. This is useful
for judging the relative security of a key.
:return: bits in the key (as an `int`)
"""
return 0
def can_sign(self):
"""
Return ``True`` if this key has the private part necessary for signing
data.
"""
return False
def get_fingerprint(self):
"""
Return an MD5 fingerprint of the public part of this key. Nothing
secret is revealed.
:return:
a 16-byte `string <str>` (binary) of the MD5 fingerprint, in SSH
format.
"""
return md5(self.asbytes()).digest()
def get_base64(self):
"""
Return a base64 string containing the public part of this key. Nothing
secret is revealed. This format is compatible with that used to store
public key files or recognized host keys.
:return: a base64 `string <str>` containing the public part of the key.
"""
return u(encodebytes(self.asbytes())).replace('\n', '')
def sign_ssh_data(self, data):
"""
Sign a blob of data with this private key, and return a `.Message`
representing an SSH signature message.
:param str data: the data to sign.
:return: an SSH signature `message <.Message>`.
"""
return bytes()
def verify_ssh_sig(self, data, msg):
"""
Given a blob of data, and an SSH message representing a signature of
that data, verify that it was signed with this key.
:param str data: the data that was signed.
:param .Message msg: an SSH signature message
:return:
``True`` if the signature verifies correctly; ``False`` otherwise.
"""
return False
@classmethod
def from_private_key_file(cls, filename, password=None):
"""
Create a key object by reading a private key file. If the private
key is encrypted and ``password`` is not ``None``, the given password
will be used to decrypt the key (otherwise `.PasswordRequiredException`
is thrown). Through the magic of Python, this factory method will
exist in all subclasses of PKey (such as `.RSAKey` or `.DSSKey`), but
is useless on the abstract PKey class.
:param str filename: name of the file to read
:param str password: an optional password to use to decrypt the key file,
if it's encrypted
:return: a new `.PKey` based on the given private key
:raises IOError: if there was an error reading the file
:raises PasswordRequiredException: if the private key file is
encrypted, and ``password`` is ``None``
:raises SSHException: if the key file is invalid
"""
key = cls(filename=filename, password=password)
return key
@classmethod
def from_private_key(cls, file_obj, password=None):
"""
Create a key object by reading a private key from a file (or file-like)
object. If the private key is encrypted and ``password`` is not ``None``,
the given password will be used to decrypt the key (otherwise
`.PasswordRequiredException` is thrown).
:param file file_obj: the file to read from
:param str password:
an optional password to use to decrypt the key, if it's encrypted
:return: a new `.PKey` based on the given private key
:raises IOError: if there was an error reading the key
:raises PasswordRequiredException: if the private key file is encrypted,
and ``password`` is ``None``
:raises SSHException: if the key file is invalid
"""
key = cls(file_obj=file_obj, password=password)
return key
def write_private_key_file(self, filename, password=None):
"""
Write private key contents into a file. If the password is not
``None``, the key is encrypted before writing.
:param str filename: name of the file to write
:param str password:
an optional password to use to encrypt the key file
:raises IOError: if there was an error writing the file
:raises SSHException: if the key is invalid
"""
raise Exception('Not implemented in PKey')
def write_private_key(self, file_obj, password=None):
"""
Write private key contents into a file (or file-like) object. If the
password is not ``None``, the key is encrypted before writing.
:param file file_obj: the file object to write into
:param str password: an optional password to use to encrypt the key
:raises IOError: if there was an error writing to the file
:raises SSHException: if the key is invalid
"""
raise Exception('Not implemented in PKey')
def _read_private_key_file(self, tag, filename, password=None):
"""
Read an SSH2-format private key file, looking for a string of the type
``"BEGIN xxx PRIVATE KEY"`` for some ``xxx``, base64-decode the text we
find, and return it as a string. If the private key is encrypted and
``password`` is not ``None``, the given password will be used to decrypt
the key (otherwise `.PasswordRequiredException` is thrown).
:param str tag: ``"RSA"`` or ``"DSA"``, the tag used to mark the data block.
:param str filename: name of the file to read.
:param str password:
an optional password to use to decrypt the key file, if it's
encrypted.
:return: data blob (`str`) that makes up the private key.
:raises IOError: if there was an error reading the file.
:raises PasswordRequiredException: if the private key file is
encrypted, and ``password`` is ``None``.
:raises SSHException: if the key file is invalid.
"""
with open(filename, 'r') as f:
data = self._read_private_key(tag, f, password)
return data
def _read_private_key(self, tag, f, password=None):
lines = f.readlines()
start = 0
while (start < len(lines)) and (lines[start].strip() != '-----BEGIN ' + tag + ' PRIVATE KEY-----'):
start += 1
if start >= len(lines):
raise SSHException('not a valid ' + tag + ' private key file')
# parse any headers first
headers = {}
start += 1
while start < len(lines):
l = lines[start].split(': ')
if len(l) == 1:
break
headers[l[0].lower()] = l[1].strip()
start += 1
# find end
end = start
while (lines[end].strip() != '-----END ' + tag + ' PRIVATE KEY-----') and (end < len(lines)):
end += 1
# if we trudged to the end of the file, just try to cope.
try:
data = decodebytes(b(''.join(lines[start:end])))
except base64.binascii.Error as e:
raise SSHException('base64 decoding error: ' + str(e))
if 'proc-type' not in headers:
# unencryped: done
return data
# encrypted keyfile: will need a password
if headers['proc-type'] != '4,ENCRYPTED':
raise SSHException('Unknown private key structure "%s"' % headers['proc-type'])
try:
encryption_type, saltstr = headers['dek-info'].split(',')
except:
raise SSHException("Can't parse DEK-info in private key file")
if encryption_type not in self._CIPHER_TABLE:
raise SSHException('Unknown private key cipher "%s"' % encryption_type)
# if no password was passed in, raise an exception pointing out that we need one
if password is None:
raise PasswordRequiredException('Private key file is encrypted')
cipher = self._CIPHER_TABLE[encryption_type]['cipher']
keysize = self._CIPHER_TABLE[encryption_type]['keysize']
mode = self._CIPHER_TABLE[encryption_type]['mode']
salt = unhexlify(b(saltstr))
key = util.generate_key_bytes(md5, salt, password, keysize)
return cipher.new(key, mode, salt).decrypt(data)
def _write_private_key_file(self, tag, filename, data, password=None):
"""
Write an SSH2-format private key file in a form that can be read by
paramiko or openssh. If no password is given, the key is written in
a trivially-encoded format (base64) which is completely insecure. If
a password is given, DES-EDE3-CBC is used.
:param str tag: ``"RSA"`` or ``"DSA"``, the tag used to mark the data block.
:param file filename: name of the file to write.
:param str data: data blob that makes up the private key.
:param str password: an optional password to use to encrypt the file.
:raises IOError: if there was an error writing the file.
"""
with open(filename, 'w', o600) as f:
# grrr... the mode doesn't always take hold
os.chmod(filename, o600)
self._write_private_key(tag, f, data, password)
def _write_private_key(self, tag, f, data, password=None):
f.write('-----BEGIN %s PRIVATE KEY-----\n' % tag)
if password is not None:
cipher_name = list(self._CIPHER_TABLE.keys())[0]
cipher = self._CIPHER_TABLE[cipher_name]['cipher']
keysize = self._CIPHER_TABLE[cipher_name]['keysize']
blocksize = self._CIPHER_TABLE[cipher_name]['blocksize']
mode = self._CIPHER_TABLE[cipher_name]['mode']
salt = os.urandom(blocksize)
key = util.generate_key_bytes(md5, salt, password, keysize)
if len(data) % blocksize != 0:
n = blocksize - len(data) % blocksize
#data += os.urandom(n)
# that would make more sense ^, but it confuses openssh.
data += zero_byte * n
data = cipher.new(key, mode, salt).encrypt(data)
f.write('Proc-Type: 4,ENCRYPTED\n')
f.write('DEK-Info: %s,%s\n' % (cipher_name, u(hexlify(salt)).upper()))
f.write('\n')
s = u(encodebytes(data))
# re-wrap to 64-char lines
s = ''.join(s.split('\n'))
s = '\n'.join([s[i: i + 64] for i in range(0, len(s), 64)])
f.write(s)
f.write('\n')
f.write('-----END %s PRIVATE KEY-----\n' % tag)
|
orgito/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/nxos/nxos_udld.py
|
34
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_udld
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages UDLD global configuration params.
description:
- Manages UDLD global configuration params.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Module will fail if the udld feature has not been previously enabled.
options:
aggressive:
description:
- Toggles aggressive mode.
choices: ['enabled','disabled']
msg_time:
description:
- Message time in seconds for UDLD packets or keyword 'default'.
reset:
description:
- Ability to reset all ports shut down by UDLD. 'state' parameter
cannot be 'absent' when this is present.
type: bool
default: 'no'
state:
description:
- Manage the state of the resource. When set to 'absent',
aggressive and msg_time are set to their default values.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure udld aggressive mode is globally disabled and se global message interval is 20
- nxos_udld:
aggressive: disabled
msg_time: 20
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Ensure agg mode is globally enabled and msg time is 15
- nxos_udld:
aggressive: enabled
msg_time: 15
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
existing:
description:
- k/v pairs of existing udld configuration
returned: always
type: dict
sample: {"aggressive": "disabled", "msg_time": "15"}
end_state:
description: k/v pairs of udld configuration after module execution
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["udld message-time 40", "udld aggressive"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
PARAM_TO_DEFAULT_KEYMAP = {
'msg_time': '15',
}
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_commands_config_udld_global(delta, reset, existing):
commands = []
for param, value in delta.items():
if param == 'aggressive':
command = 'udld aggressive' if value == 'enabled' else 'no udld aggressive'
commands.append(command)
elif param == 'msg_time':
if value == 'default':
if existing.get('msg_time') != PARAM_TO_DEFAULT_KEYMAP.get('msg_time'):
commands.append('no udld message-time')
else:
commands.append('udld message-time ' + value)
if reset:
command = 'udld reset'
commands.append(command)
return commands
def get_commands_remove_udld_global(existing):
commands = []
if existing.get('aggressive') == 'enabled':
command = 'no udld aggressive'
commands.append(command)
if existing.get('msg_time') != PARAM_TO_DEFAULT_KEYMAP.get('msg_time'):
command = 'no udld message-time'
commands.append(command)
return commands
def get_udld_global(module):
command = 'show udld global | json'
udld_table = run_commands(module, [command])[0]
status = str(udld_table.get('udld-global-mode', None))
if status == 'enabled-aggressive':
aggressive = 'enabled'
else:
aggressive = 'disabled'
interval = str(udld_table.get('message-interval', None))
udld = dict(msg_time=interval, aggressive=aggressive)
return udld
def main():
argument_spec = dict(
aggressive=dict(required=False, choices=['enabled', 'disabled']),
msg_time=dict(required=False, type='str'),
reset=dict(required=False, type='bool'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
aggressive = module.params['aggressive']
msg_time = module.params['msg_time']
reset = module.params['reset']
state = module.params['state']
if reset and state == 'absent':
module.fail_json(msg="state must be present when using reset flag.")
args = dict(aggressive=aggressive, msg_time=msg_time, reset=reset)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_udld_global(module)
end_state = existing
delta = set(proposed.items()).difference(existing.items())
changed = False
commands = []
if state == 'present':
if delta:
command = get_commands_config_udld_global(dict(delta), reset, existing)
commands.append(command)
elif state == 'absent':
command = get_commands_remove_udld_global(existing)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_udld_global(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
|
diorcety/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/gis/geos/factory.py
|
400
|
from django.contrib.gis.geos.geometry import GEOSGeometry, wkt_regex, hex_regex
def fromfile(file_h):
"""
Given a string file name, returns a GEOSGeometry. The file may contain WKB,
WKT, or HEX.
"""
# If given a file name, get a real handle.
if isinstance(file_h, basestring):
file_h = open(file_h, 'rb')
# Reading in the file's contents,
buf = file_h.read()
# If we get WKB need to wrap in buffer(), so run through regexes.
if wkt_regex.match(buf) or hex_regex.match(buf):
return GEOSGeometry(buf)
else:
return GEOSGeometry(buffer(buf))
def fromstr(string, **kwargs):
"Given a string value, returns a GEOSGeometry object."
return GEOSGeometry(string, **kwargs)
|
sve-odoo/odoo
|
refs/heads/master
|
addons/product/product.py
|
12
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
import re
import time
from _common import ceiling
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import psycopg2
import openerp.addons.decimal_precision as dp
def ean_checksum(eancode):
"""returns the checksum of an ean string of length 13, returns -1 if the string has the wrong length"""
if len(eancode) != 13:
return -1
oddsum=0
evensum=0
total=0
eanvalue=eancode
reversevalue = eanvalue[::-1]
finalean=reversevalue[1:]
for i in range(len(finalean)):
if i % 2 == 0:
oddsum += int(finalean[i])
else:
evensum += int(finalean[i])
total=(oddsum * 3) + evensum
check = int(10 - math.ceil(total % 10.0)) %10
return check
def check_ean(eancode):
"""returns True if eancode is a valid ean13 string, or null"""
if not eancode:
return True
if len(eancode) != 13:
return False
try:
int(eancode)
except:
return False
return ean_checksum(eancode) == int(eancode[-1])
def sanitize_ean13(ean13):
"""Creates and returns a valid ean13 from an invalid one"""
if not ean13:
return "0000000000000"
ean13 = re.sub("[A-Za-z]","0",ean13);
ean13 = re.sub("[^0-9]","",ean13);
ean13 = ean13[:13]
if len(ean13) < 13:
ean13 = ean13 + '0' * (13-len(ean13))
return ean13[:-1] + str(ean_checksum(ean13))
#----------------------------------------------------------
# UOM
#----------------------------------------------------------
class product_uom_categ(osv.osv):
_name = 'product.uom.categ'
_description = 'Product uom categ'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class product_uom(osv.osv):
_name = 'product.uom'
_description = 'Product Unit of Measure'
def _compute_factor_inv(self, factor):
return factor and (1.0 / factor) or 0.0
def _factor_inv(self, cursor, user, ids, name, arg, context=None):
res = {}
for uom in self.browse(cursor, user, ids, context=context):
res[uom.id] = self._compute_factor_inv(uom.factor)
return res
def _factor_inv_write(self, cursor, user, id, name, value, arg, context=None):
return self.write(cursor, user, id, {'factor': self._compute_factor_inv(value)}, context=context)
def name_create(self, cr, uid, name, context=None):
""" The UoM category and factor are required, so we'll have to add temporary values
for imported UoMs """
uom_categ = self.pool.get('product.uom.categ')
# look for the category based on the english name, i.e. no context on purpose!
# TODO: should find a way to have it translated but not created until actually used
categ_misc = 'Unsorted/Imported Units'
categ_id = uom_categ.search(cr, uid, [('name', '=', categ_misc)])
if categ_id:
categ_id = categ_id[0]
else:
categ_id, _ = uom_categ.name_create(cr, uid, categ_misc)
uom_id = self.create(cr, uid, {self._rec_name: name,
'category_id': categ_id,
'factor': 1})
return self.name_get(cr, uid, [uom_id], context=context)[0]
def create(self, cr, uid, data, context=None):
if 'factor_inv' in data:
if data['factor_inv'] != 1:
data['factor'] = self._compute_factor_inv(data['factor_inv'])
del(data['factor_inv'])
return super(product_uom, self).create(cr, uid, data, context)
_order = "name"
_columns = {
'name': fields.char('Unit of Measure', required=True, translate=True),
'category_id': fields.many2one('product.uom.categ', 'Product Category', required=True, ondelete='cascade',
help="Conversion between Units of Measure can only occur if they belong to the same category. The conversion will be made based on the ratios."),
'factor': fields.float('Ratio', required=True,digits=(12, 12),
help='How much bigger or smaller this unit is compared to the reference Unit of Measure for this category:\n'\
'1 * (reference unit) = ratio * (this unit)'),
'factor_inv': fields.function(_factor_inv, digits=(12,12),
fnct_inv=_factor_inv_write,
string='Bigger Ratio',
help='How many times this Unit of Measure is bigger than the reference Unit of Measure in this category:\n'\
'1 * (this unit) = ratio * (reference unit)', required=True),
'rounding': fields.float('Rounding Precision', digits_compute=dp.get_precision('Product Unit of Measure'), required=True,
help="The computed quantity will be a multiple of this value. "\
"Use 1.0 for a Unit of Measure that cannot be further split, such as a piece."),
'active': fields.boolean('Active', help="By unchecking the active field you can disable a unit of measure without deleting it."),
'uom_type': fields.selection([('bigger','Bigger than the reference Unit of Measure'),
('reference','Reference Unit of Measure for this category'),
('smaller','Smaller than the reference Unit of Measure')],'Type', required=1),
}
_defaults = {
'active': 1,
'rounding': 0.01,
'uom_type': 'reference',
}
_sql_constraints = [
('factor_gt_zero', 'CHECK (factor!=0)', 'The conversion ratio for a unit of measure cannot be 0!')
]
def _compute_qty(self, cr, uid, from_uom_id, qty, to_uom_id=False, round=True):
if not from_uom_id or not qty or not to_uom_id:
return qty
uoms = self.browse(cr, uid, [from_uom_id, to_uom_id])
if uoms[0].id == from_uom_id:
from_unit, to_unit = uoms[0], uoms[-1]
else:
from_unit, to_unit = uoms[-1], uoms[0]
return self._compute_qty_obj(cr, uid, from_unit, qty, to_unit, round=round)
def _compute_qty_obj(self, cr, uid, from_unit, qty, to_unit, round=True, context=None):
if context is None:
context = {}
if from_unit.category_id.id != to_unit.category_id.id:
if context.get('raise-exception', True):
raise osv.except_osv(_('Error!'), _('Conversion from Product UoM %s to Default UoM %s is not possible as they both belong to different Category!.') % (from_unit.name,to_unit.name,))
else:
return qty
amount = qty / from_unit.factor
if to_unit:
amount = amount * to_unit.factor
if round:
amount = ceiling(amount, to_unit.rounding)
return amount
def _compute_price(self, cr, uid, from_uom_id, price, to_uom_id=False):
if not from_uom_id or not price or not to_uom_id:
return price
from_unit, to_unit = self.browse(cr, uid, [from_uom_id, to_uom_id])
if from_unit.category_id.id != to_unit.category_id.id:
return price
amount = price * from_unit.factor
if to_uom_id:
amount = amount / to_unit.factor
return amount
def onchange_type(self, cursor, user, ids, value):
if value == 'reference':
return {'value': {'factor': 1, 'factor_inv': 1}}
return {}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if 'category_id' in vals:
for uom in self.browse(cr, uid, ids, context=context):
if uom.category_id.id != vals['category_id']:
raise osv.except_osv(_('Warning!'),_("Cannot change the category of existing Unit of Measure '%s'.") % (uom.name,))
return super(product_uom, self).write(cr, uid, ids, vals, context=context)
class product_ul(osv.osv):
_name = "product.ul"
_description = "Logistic Unit"
_columns = {
'name' : fields.char('Name', select=True, required=True, translate=True),
'type' : fields.selection([('unit','Unit'),('pack','Pack'),('box', 'Box'), ('pallet', 'Pallet')], 'Type', required=True),
'height': fields.float('Height', help='The height of the package'),
'width': fields.float('Width', help='The width of the package'),
'length': fields.float('Length', help='The length of the package'),
'weight': fields.float('Empty Package Weight'),
}
#----------------------------------------------------------
# Categories
#----------------------------------------------------------
class product_category(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if not context:
context = {}
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_name = "product.category"
_description = "Product Category"
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.category','Parent Category', select=True, ondelete='cascade'),
'child_id': fields.one2many('product.category', 'parent_id', string='Child Categories'),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of product categories."),
'type': fields.selection([('view','View'), ('normal','Normal')], 'Category Type', help="A category of the view type is a virtual category that can be used as the parent of another category to create a hierarchical structure."),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
}
_defaults = {
'type' : 'normal',
}
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'sequence, name'
_order = 'parent_left'
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
class produce_price_history(osv.osv):
"""
Keep track of the ``product.template`` standard prices as they are changed.
"""
_name = 'product.price.history'
_rec_name = 'datetime'
_order = 'datetime desc'
_columns = {
'company_id': fields.many2one('res.company', required=True),
'product_template_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'datetime': fields.datetime('Historization Time'),
'cost': fields.float('Historized Cost'),
}
def _get_default_company(self, cr, uid, context=None):
if 'force_company' in context:
return context['force_company']
else:
company = self.pool['res.users'].browse(cr, uid, uid,
context=context).company_id
return company.id if company else False
_defaults = {
'datetime': fields.datetime.now,
'company_id': _get_default_company,
}
#----------------------------------------------------------
# Product Attributes
#----------------------------------------------------------
class product_attribute(osv.osv):
_name = "product.attribute"
_description = "Product Attribute"
_columns = {
'name': fields.char('Name', translate=True, required=True),
'value_ids': fields.one2many('product.attribute.value', 'attribute_id', 'Values', copy=True),
}
class product_attribute_value(osv.osv):
_name = "product.attribute.value"
_order = 'sequence'
def _get_price_extra(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, 0)
if not context.get('active_id'):
return result
for obj in self.browse(cr, uid, ids, context=context):
for price_id in obj.price_ids:
if price_id.product_tmpl_id.id == context.get('active_id'):
result[obj.id] = price_id.price_extra
break
return result
def _set_price_extra(self, cr, uid, id, name, value, args, context=None):
if context is None:
context = {}
if 'active_id' not in context:
return None
p_obj = self.pool['product.attribute.price']
p_ids = p_obj.search(cr, uid, [('value_id', '=', id), ('product_tmpl_id', '=', context['active_id'])], context=context)
if p_ids:
p_obj.write(cr, uid, p_ids, {'price_extra': value}, context=context)
else:
p_obj.create(cr, uid, {
'product_tmpl_id': context['active_id'],
'value_id': id,
'price_extra': value,
}, context=context)
_columns = {
'sequence': fields.integer('Sequence', help="Determine the display order"),
'name': fields.char('Value', translate=True, required=True),
'attribute_id': fields.many2one('product.attribute', 'Attribute', required=True, ondelete='cascade'),
'product_ids': fields.many2many('product.product', id1='att_id', id2='prod_id', string='Variants', readonly=True),
'price_extra': fields.function(_get_price_extra, type='float', string='Attribute Price Extra',
fnct_inv=_set_price_extra,
digits_compute=dp.get_precision('Product Price'),
help="Price Extra: Extra price for the variant with this attribute value on sale price. eg. 200 price extra, 1000 + 200 = 1200."),
'price_ids': fields.one2many('product.attribute.price', 'value_id', string='Attribute Prices', readonly=True),
}
_sql_constraints = [
('value_company_uniq', 'unique (name,attribute_id)', 'This attribute value already exists !')
]
_defaults = {
'price_extra': 0.0,
}
def unlink(self, cr, uid, ids, context=None):
ctx = dict(context or {}, active_test=False)
product_ids = self.pool['product.product'].search(cr, uid, [('attribute_value_ids', 'in', ids)], context=ctx)
if product_ids:
raise osv.except_osv(_('Integrity Error!'), _('The operation cannot be completed:\nYou trying to delete an attribute value with a reference on a product variant.'))
return super(product_attribute_value, self).unlink(cr, uid, ids, context=context)
class product_attribute_price(osv.osv):
_name = "product.attribute.price"
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'value_id': fields.many2one('product.attribute.value', 'Product Attribute Value', required=True, ondelete='cascade'),
'price_extra': fields.float('Price Extra', digits_compute=dp.get_precision('Product Price')),
}
class product_attribute_line(osv.osv):
_name = "product.attribute.line"
_rec_name = 'attribute_id'
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'attribute_id': fields.many2one('product.attribute', 'Attribute', required=True, ondelete='restrict'),
'value_ids': fields.many2many('product.attribute.value', id1='line_id', id2='val_id', string='Product Attribute Value'),
}
#----------------------------------------------------------
# Products
#----------------------------------------------------------
class product_template(osv.osv):
_name = "product.template"
_inherit = ['mail.thread']
_description = "Product Template"
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image, avoid_resize_medium=True)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
def _is_product_variant(self, cr, uid, ids, name, arg, context=None):
return self._is_product_variant_impl(cr, uid, ids, name, arg, context=context)
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
prod = self.pool.get('product.product')
res = dict.fromkeys(ids, False)
ctx = dict(context, active_test=True)
for product in self.browse(cr, uid, ids, context=context):
res[product.id] = prod.search(cr, uid, [('product_tmpl_id','=',product.id)], context=ctx, count=True) == 1
return res
def _product_template_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pricelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def get_history_price(self, cr, uid, product_tmpl, company_id, date=None, context=None):
if context is None:
context = {}
if date is None:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
price_history_obj = self.pool.get('product.price.history')
history_ids = price_history_obj.search(cr, uid, [('company_id', '=', company_id), ('product_template_id', '=', product_tmpl), ('datetime', '<=', date)], limit=1)
if history_ids:
return price_history_obj.read(cr, uid, history_ids[0], ['cost'], context=context)['cost']
return 0.0
def _set_standard_price(self, cr, uid, product_tmpl_id, value, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if context is None:
context = {}
price_history_obj = self.pool['product.price.history']
user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
company_id = context.get('force_company', user_company)
price_history_obj.create(cr, uid, {
'product_template_id': product_tmpl_id,
'cost': value,
'company_id': company_id,
}, context=context)
def _get_product_variant_count(self, cr, uid, ids, name, arg, context=None):
res = {}
for product in self.browse(cr, uid, ids):
res[product.id] = len(product.product_variant_ids)
return res
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'product_manager': fields.many2one('res.users','Product Manager'),
'description': fields.text('Description',translate=True,
help="A precise description of the Product, used only for internal information purposes."),
'description_purchase': fields.text('Purchase Description',translate=True,
help="A description of the Product that you want to communicate to your suppliers. "
"This description will be copied to every Purchase Order, Receipt and Supplier Invoice/Refund."),
'description_sale': fields.text('Sale Description',translate=True,
help="A description of the Product that you want to communicate to your customers. "
"This description will be copied to every Sale Order, Delivery Order and Customer Invoice/Refund"),
'type': fields.selection([('consu', 'Consumable'),('service','Service')], 'Product Type', required=True, help="Consumable are product where you don't manage stock, a service is a non-material product provided by a company or an individual."),
'rental': fields.boolean('Can be Rent'),
'categ_id': fields.many2one('product.category','Internal Category', required=True, change_default=True, domain="[('type','=','normal')]" ,help="Select category for the current product"),
'price': fields.function(_product_template_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'list_price': fields.float('Sale Price', digits_compute=dp.get_precision('Product Price'), help="Base price to compute the customer price. Sometimes called the catalog price."),
'lst_price' : fields.related('list_price', type="float", string='Public Price', digits_compute=dp.get_precision('Product Price')),
'standard_price': fields.property(type = 'float', digits_compute=dp.get_precision('Product Price'),
help="Cost price of the product template used for standard stock valuation in accounting and used as a base price on purchase orders.",
groups="base.group_user", string="Cost Price"),
'volume': fields.float('Volume', help="The volume in m3."),
'weight': fields.float('Gross Weight', digits_compute=dp.get_precision('Stock Weight'), help="The gross weight in Kg."),
'weight_net': fields.float('Net Weight', digits_compute=dp.get_precision('Stock Weight'), help="The net weight in Kg."),
'warranty': fields.float('Warranty'),
'sale_ok': fields.boolean('Can be Sold', help="Specify if the product can be selected in a sales order line."),
'pricelist_id': fields.dummy(string='Pricelist', relation='product.pricelist', type='many2one'),
'state': fields.selection([('',''),
('draft', 'In Development'),
('sellable','Normal'),
('end','End of Lifecycle'),
('obsolete','Obsolete')], 'Status'),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True, help="Default Unit of Measure used for all stock operation."),
'uom_po_id': fields.many2one('product.uom', 'Purchase Unit of Measure', required=True, help="Default Unit of Measure used for purchase orders. It must be in the same category than the default unit of measure."),
'uos_id' : fields.many2one('product.uom', 'Unit of Sale',
help='Specify a unit of measure here if invoicing is made in another unit of measure than inventory. Keep empty to use the default unit of measure.'),
'uos_coeff': fields.float('Unit of Measure -> UOS Coeff', digits_compute= dp.get_precision('Product UoS'),
help='Coefficient to convert default Unit of Measure to Unit of Sale\n'
' uos = uom * coeff'),
'mes_type': fields.selection((('fixed', 'Fixed'), ('variable', 'Variable')), 'Measure Type'),
'company_id': fields.many2one('res.company', 'Company', select=1),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the product, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the product. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved, "\
"only when the image exceeds one of those sizes. Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the product. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'packaging_ids': fields.one2many(
'product.packaging', 'product_tmpl_id', 'Logistical Units',
help="Gives the different ways to package the same product. This has no impact on "
"the picking order and is mainly used if you use the EDI module."),
'seller_ids': fields.one2many('product.supplierinfo', 'product_tmpl_id', 'Supplier'),
'seller_delay': fields.related('seller_ids','delay', type='integer', string='Supplier Lead Time',
help="This is the average delay in days between the purchase order confirmation and the receipts for this product and for the default supplier. It is used by the scheduler to order requests based on reordering delays."),
'seller_qty': fields.related('seller_ids','qty', type='float', string='Supplier Quantity',
help="This is minimum quantity to purchase from Main Supplier."),
'seller_id': fields.related('seller_ids','name', type='many2one', relation='res.partner', string='Main Supplier',
help="Main Supplier who has highest priority in Supplier List."),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'color': fields.integer('Color Index'),
'is_product_variant': fields.function( _is_product_variant, type='boolean', string='Only one product variant'),
'attribute_line_ids': fields.one2many('product.attribute.line', 'product_tmpl_id', 'Product Attributes'),
'product_variant_ids': fields.one2many('product.product', 'product_tmpl_id', 'Products', required=True),
'product_variant_count': fields.function( _get_product_variant_count, type='integer', string='# of Product Variants'),
# related to display product product information if is_product_variant
'ean13': fields.related('product_variant_ids', 'ean13', type='char', string='EAN13 Barcode'),
'default_code': fields.related('product_variant_ids', 'default_code', type='char', string='Internal Reference'),
}
def _price_get_list_price(self, product):
return 0.0
def _price_get(self, cr, uid, products, ptype='list_price', context=None):
if context is None:
context = {}
if 'currency_id' in context:
pricetype_obj = self.pool.get('product.price.type')
price_type_id = pricetype_obj.search(cr, uid, [('field','=',ptype)])[0]
price_type_currency_id = pricetype_obj.browse(cr,uid,price_type_id).currency_id.id
res = {}
product_uom_obj = self.pool.get('product.uom')
for product in products:
res[product.id] = product[ptype] or 0.0
if ptype == 'list_price':
res[product.id] += product._name == "product.product" and product.price_extra or 0.0
if 'uom' in context:
uom = product.uom_id or product.uos_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, res[product.id], context['uom'])
# Convert from price_type currency to asked one
if 'currency_id' in context:
# Take the price_type currency from the product field
# This is right cause a field cannot be in more than one currency
res[product.id] = self.pool.get('res.currency').compute(cr, uid, price_type_currency_id,
context['currency_id'], res[product.id],context=context)
return res
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
def _default_category(self, cr, uid, context=None):
if context is None:
context = {}
if 'categ_id' in context and context['categ_id']:
return context['categ_id']
md = self.pool.get('ir.model.data')
res = False
try:
res = md.get_object_reference(cr, uid, 'product', 'product_category_all')[1]
except ValueError:
res = False
return res
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id:
return {'value': {'uom_po_id': uom_id}}
return {}
def create_variant_ids(self, cr, uid, ids, context=None):
product_obj = self.pool.get("product.product")
ctx = context and context.copy() or {}
if ctx.get("create_product_variant"):
return None
ctx.update(active_test=False, create_product_variant=True)
tmpl_ids = self.browse(cr, uid, ids, context=ctx)
for tmpl_id in tmpl_ids:
# list of values combination
all_variants = [[]]
for variant_id in tmpl_id.attribute_line_ids:
if len(variant_id.value_ids) > 1:
temp_variants = []
for value_id in variant_id.value_ids:
for variant in all_variants:
temp_variants.append(variant + [int(value_id)])
all_variants = temp_variants
# check product
variant_ids_to_active = []
variants_active_ids = []
variants_inactive = []
for product_id in tmpl_id.product_variant_ids:
variants = map(int,product_id.attribute_value_ids)
if variants in all_variants:
variants_active_ids.append(product_id.id)
all_variants.pop(all_variants.index(variants))
if not product_id.active:
variant_ids_to_active.append(product_id.id)
else:
variants_inactive.append(product_id)
if variant_ids_to_active:
product_obj.write(cr, uid, variant_ids_to_active, {'active': True}, context=ctx)
# create new product
for variant_ids in all_variants:
values = {
'product_tmpl_id': tmpl_id.id,
'attribute_value_ids': [(6, 0, variant_ids)]
}
id = product_obj.create(cr, uid, values, context=ctx)
variants_active_ids.append(id)
# unlink or inactive product
for variant_id in map(int,variants_inactive):
try:
with cr.savepoint():
product_obj.unlink(cr, uid, [variant_id], context=ctx)
except (psycopg2.Error, osv.except_osv):
product_obj.write(cr, uid, [variant_id], {'active': False}, context=ctx)
pass
return True
def create(self, cr, uid, vals, context=None):
''' Store the initial standard price in order to be able to retrieve the cost of a product template for a given date'''
product_template_id = super(product_template, self).create(cr, uid, vals, context=context)
if not context or "create_product_product" not in context:
self.create_variant_ids(cr, uid, [product_template_id], context=context)
self._set_standard_price(cr, uid, product_template_id, vals.get('standard_price', 0.0), context=context)
# TODO: this is needed to set given values to first variant after creation
# these fields should be moved to product as lead to confusion
related_vals = {}
if vals.get('ean13'):
related_vals['ean13'] = vals['ean13']
if vals.get('default_code'):
related_vals['default_code'] = vals['default_code']
if related_vals:
self.write(cr, uid, product_template_id, related_vals, context=context)
return product_template_id
def write(self, cr, uid, ids, vals, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if isinstance(ids, (int, long)):
ids = [ids]
if 'uom_po_id' in vals:
new_uom = self.pool.get('product.uom').browse(cr, uid, vals['uom_po_id'], context=context)
for product in self.browse(cr, uid, ids, context=context):
old_uom = product.uom_po_id
if old_uom.category_id.id != new_uom.category_id.id:
raise osv.except_osv(_('Unit of Measure categories Mismatch!'), _("New Unit of Measure '%s' must belong to same Unit of Measure category '%s' as of old Unit of Measure '%s'. If you need to change the unit of measure, you may deactivate this product from the 'Procurements' tab and create a new one.") % (new_uom.name, old_uom.category_id.name, old_uom.name,))
if 'standard_price' in vals:
for prod_template_id in ids:
self._set_standard_price(cr, uid, prod_template_id, vals['standard_price'], context=context)
res = super(product_template, self).write(cr, uid, ids, vals, context=context)
if 'attribute_line_ids' in vals or vals.get('active'):
self.create_variant_ids(cr, uid, ids, context=context)
if 'active' in vals and not vals.get('active'):
ctx = context and context.copy() or {}
ctx.update(active_test=False)
product_ids = []
for product in self.browse(cr, uid, ids, context=ctx):
product_ids = map(int,product.product_variant_ids)
self.pool.get("product.product").write(cr, uid, product_ids, {'active': vals.get('active')}, context=ctx)
return res
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
template = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % (template['name'])
return super(product_template, self).copy(cr, uid, id, default=default, context=context)
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'product.template', context=c),
'list_price': 1,
'standard_price': 0.0,
'sale_ok': 1,
'uom_id': _get_uom_id,
'uom_po_id': _get_uom_id,
'uos_coeff': 1.0,
'mes_type': 'fixed',
'categ_id' : _default_category,
'type' : 'consu',
'active': True,
}
def _check_uom(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uom_id.category_id.id != product.uom_po_id.category_id.id:
return False
return True
def _check_uos(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uos_id \
and product.uos_id.category_id.id \
== product.uom_id.category_id.id:
return False
return True
_constraints = [
(_check_uom, 'Error: The default Unit of Measure and the purchase Unit of Measure must be in the same category.', ['uom_id']),
]
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if 'partner_id' in context:
pass
return super(product_template, self).name_get(cr, user, ids, context)
class product_product(osv.osv):
_name = "product.product"
_description = "Product"
_inherits = {'product.template': 'product_tmpl_id'}
_inherit = ['mail.thread']
_order = 'default_code,name_template'
def _product_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
if context is None:
context = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pricelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def view_header_get(self, cr, uid, view_id, view_type, context=None):
if context is None:
context = {}
res = super(product_product, self).view_header_get(cr, uid, view_id, view_type, context)
if (context.get('categ_id', False)):
return _('Products: ') + self.pool.get('product.category').browse(cr, uid, context['categ_id'], context=context).name
return res
def _product_lst_price(self, cr, uid, ids, name, arg, context=None):
product_uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, 0.0)
for product in self.browse(cr, uid, ids, context=context):
if 'uom' in context:
uom = product.uos_id or product.uom_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, product.list_price, context['uom'])
else:
res[product.id] = product.list_price
res[product.id] = res[product.id] + product.price_extra
return res
def _set_product_lst_price(self, cr, uid, id, name, value, args, context=None):
product_uom_obj = self.pool.get('product.uom')
product = self.browse(cr, uid, id, context=context)
if 'uom' in context:
uom = product.uos_id or product.uom_id
value = product_uom_obj._compute_price(cr, uid,
context['uom'], value, uom.id)
value = value - product.price_extra
return product.write({'list_price': value}, context=context)
def _get_partner_code_name(self, cr, uid, ids, product, partner_id, context=None):
for supinfo in product.seller_ids:
if supinfo.name.id == partner_id:
return {'code': supinfo.product_code or product.default_code, 'name': supinfo.product_name or product.name}
res = {'code': product.default_code, 'name': product.name}
return res
def _product_code(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
res[p.id] = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)['code']
return res
def _product_partner_ref(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
data = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)
if not data['code']:
data['code'] = p.code
if not data['name']:
data['name'] = p.name
res[p.id] = (data['code'] and ('['+data['code']+'] ') or '') + (data['name'] or '')
return res
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
return dict.fromkeys(ids, True)
def _get_name_template_ids(self, cr, uid, ids, context=None):
result = set()
template_ids = self.pool.get('product.product').search(cr, uid, [('product_tmpl_id', 'in', ids)])
for el in template_ids:
result.add(el)
return list(result)
def _get_image_variant(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = obj.image_variant or getattr(obj.product_tmpl_id, name)
return result
def _set_image_variant(self, cr, uid, id, name, value, args, context=None):
image = tools.image_resize_image_big(value)
res = self.write(cr, uid, [id], {'image_variant': image}, context=context)
product = self.browse(cr, uid, id, context=context)
if not product.product_tmpl_id.image:
product.write({'image_variant': None}, context=context)
product.product_tmpl_id.write({'image': image}, context=context)
return res
def _get_price_extra(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for product in self.browse(cr, uid, ids, context=context):
price_extra = 0.0
for variant_id in product.attribute_value_ids:
for price_id in variant_id.price_ids:
if price_id.product_tmpl_id.id == product.product_tmpl_id.id:
price_extra += price_id.price_extra
result[product.id] = price_extra
return result
_columns = {
'price': fields.function(_product_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'price_extra': fields.function(_get_price_extra, type='float', string='Variant Extra Price', help="This is le sum of the extra price of all attributes"),
'lst_price': fields.function(_product_lst_price, fnct_inv=_set_product_lst_price, type='float', string='Public Price', digits_compute=dp.get_precision('Product Price')),
'code': fields.function(_product_code, type='char', string='Internal Reference'),
'partner_ref' : fields.function(_product_partner_ref, type='char', string='Customer ref'),
'default_code' : fields.char('Internal Reference', select=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete="cascade", select=True),
'ean13': fields.char('EAN13 Barcode', size=13, help="International Article Number used for product identification."),
'name_template': fields.related('product_tmpl_id', 'name', string="Template Name", type='char', store={
'product.template': (_get_name_template_ids, ['name'], 10),
'product.product': (lambda self, cr, uid, ids, c=None: ids, [], 10),
}, select=True),
'attribute_value_ids': fields.many2many('product.attribute.value', id1='prod_id', id2='att_id', string='Attributes', readonly=True, ondelete='restrict'),
# image: all image fields are base64 encoded and PIL-supported
'image_variant': fields.binary("Variant Image",
help="This field holds the image used as image for the product variant, limited to 1024x1024px."),
'image': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Big-sized image", type="binary",
help="Image of the product variant (Big-sized image of product template if false). It is automatically "\
"resized as a 1024x1024px image, with aspect ratio preserved."),
'image_small': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Small-sized image", type="binary",
help="Image of the product variant (Small-sized image of product template if false)."),
'image_medium': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Medium-sized image", type="binary",
help="Image of the product variant (Medium-sized image of product template if false)."),
}
_defaults = {
'active': 1,
'color': 0,
}
def unlink(self, cr, uid, ids, context=None):
unlink_ids = []
unlink_product_tmpl_ids = []
for product in self.browse(cr, uid, ids, context=context):
tmpl_id = product.product_tmpl_id.id
# Check if the product is last product of this template
other_product_ids = self.search(cr, uid, [('product_tmpl_id', '=', tmpl_id), ('id', '!=', product.id)], context=context)
if not other_product_ids:
unlink_product_tmpl_ids.append(tmpl_id)
unlink_ids.append(product.id)
res = super(product_product, self).unlink(cr, uid, unlink_ids, context=context)
# delete templates after calling super, as deleting template could lead to deleting
# products due to ondelete='cascade'
self.pool.get('product.template').unlink(cr, uid, unlink_product_tmpl_ids, context=context)
return res
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id and uom_po_id:
uom_obj=self.pool.get('product.uom')
uom=uom_obj.browse(cursor,user,[uom_id])[0]
uom_po=uom_obj.browse(cursor,user,[uom_po_id])[0]
if uom.category_id.id != uom_po.category_id.id:
return {'value': {'uom_po_id': uom_id}}
return False
def _check_ean_key(self, cr, uid, ids, context=None):
for product in self.read(cr, uid, ids, ['ean13'], context=context):
if not check_ean(product['ean13']):
return False
return True
_constraints = [(_check_ean_key, 'You provided an invalid "EAN13 Barcode" reference. You may use the "Internal Reference" field instead.', ['ean13'])]
def on_order(self, cr, uid, ids, orderline, quantity):
pass
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not len(ids):
return []
def _name_get(d):
name = d.get('name','')
code = d.get('default_code',False)
if code:
name = '[%s] %s' % (code,name)
return (d['id'], name)
partner_id = context.get('partner_id', False)
# all user don't have access to seller and partner
# check access and use superuser
self.check_access_rights(cr, user, "read")
self.check_access_rule(cr, user, ids, "read", context=context)
result = []
for product in self.browse(cr, SUPERUSER_ID, ids, context=context):
variant = ", ".join([v.name for v in product.attribute_value_ids])
name = variant and "%s (%s)" % (product.name, variant) or product.name
sellers = []
if partner_id:
sellers = filter(lambda x: x.name.id == partner_id, product.seller_ids)
if sellers:
for s in sellers:
mydict = {
'id': product.id,
'name': s.product_name or name,
'default_code': s.product_code or product.default_code,
}
result.append(_name_get(mydict))
else:
mydict = {
'id': product.id,
'name': name,
'default_code': product.default_code,
}
result.append(_name_get(mydict))
return result
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name:
ids = self.search(cr, user, [('default_code','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('ean13','=',name)]+ args, limit=limit, context=context)
if not ids:
# Do not merge the 2 next lines into one single search, SQL search performance would be abysmal
# on a database with thousands of matching products, due to the huge merge+unique needed for the
# OR operator (and given the fact that the 'name' lookup results come from the ir.translation table
# Performing a quick memory merge of ids in Python will give much better performance
ids = set(self.search(cr, user, args + [('default_code', operator, name)], limit=limit, context=context))
if not limit or len(ids) < limit:
# we may underrun the limit because of dupes in the results, that's fine
limit2 = (limit - len(ids)) if limit else False
ids.update(self.search(cr, user, args + [('name', operator, name)], limit=limit2, context=context))
ids = list(ids)
if not ids:
ptrn = re.compile('(\[(.*?)\])')
res = ptrn.search(name)
if res:
ids = self.search(cr, user, [('default_code','=', res.group(2))] + args, limit=limit, context=context)
else:
ids = self.search(cr, user, args, limit=limit, context=context)
result = self.name_get(cr, user, ids, context=context)
return result
#
# Could be overrided for variants matrices prices
#
def price_get(self, cr, uid, ids, ptype='list_price', context=None):
products = self.browse(cr, uid, ids, context=context)
return self.pool.get("product.template")._price_get(cr, uid, products, ptype=ptype, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context={}
product = self.browse(cr, uid, id, context)
if context.get('variant'):
# if we copy a variant or create one, we keep the same template
default['product_tmpl_id'] = product.product_tmpl_id.id
elif 'name' not in default:
default['name'] = _("%s (copy)") % (product.name,)
return super(product_product, self).copy(cr, uid, id, default=default, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('search_default_categ_id'):
args.append((('categ_id', 'child_of', context['search_default_categ_id'])))
return super(product_product, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def open_product_template(self, cr, uid, ids, context=None):
""" Utility method used to add an "Open Template" button in product views """
product = self.browse(cr, uid, ids[0], context=context)
return {'type': 'ir.actions.act_window',
'res_model': 'product.template',
'view_mode': 'form',
'res_id': product.product_tmpl_id.id,
'target': 'new'}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
ctx = dict(context or {}, create_product_product=True)
return super(product_product, self).create(cr, uid, vals, context=ctx)
class product_packaging(osv.osv):
_name = "product.packaging"
_description = "Packaging"
_rec_name = 'ean'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of packaging."),
'name' : fields.text('Description'),
'qty' : fields.float('Quantity by Package',
help="The total number of products you can put by pallet or box."),
'ul' : fields.many2one('product.ul', 'Package Logistic Unit', required=True),
'ul_qty' : fields.integer('Package by layer', help='The number of packages by layer'),
'ul_container': fields.many2one('product.ul', 'Pallet Logistic Unit'),
'rows' : fields.integer('Number of Layers', required=True,
help='The number of layers on a pallet or box'),
'product_tmpl_id' : fields.many2one('product.template', 'Product', select=1, ondelete='cascade', required=True),
'ean' : fields.char('EAN', size=14, help="The EAN code of the package unit."),
'code' : fields.char('Code', help="The code of the transport unit."),
'weight': fields.float('Total Package Weight',
help='The weight of a full package, pallet or box.'),
}
def _check_ean_key(self, cr, uid, ids, context=None):
for pack in self.browse(cr, uid, ids, context=context):
if not check_ean(pack.ean):
return False
return True
_constraints = [(_check_ean_key, 'Error: Invalid ean code', ['ean'])]
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
res = []
for pckg in self.browse(cr, uid, ids, context=context):
p_name = pckg.ean and '[' + pckg.ean + '] ' or ''
p_name += pckg.ul.name
res.append((pckg.id,p_name))
return res
def _get_1st_ul(self, cr, uid, context=None):
cr.execute('select id from product_ul order by id asc limit 1')
res = cr.fetchone()
return (res and res[0]) or False
_defaults = {
'rows' : 3,
'sequence' : 1,
'ul' : _get_1st_ul,
}
def checksum(ean):
salt = '31' * 6 + '3'
sum = 0
for ean_part, salt_part in zip(ean, salt):
sum += int(ean_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
class product_supplierinfo(osv.osv):
_name = "product.supplierinfo"
_description = "Information about a product supplier"
def _calc_qty(self, cr, uid, ids, fields, arg, context=None):
result = {}
for supplier_info in self.browse(cr, uid, ids, context=context):
for field in fields:
result[supplier_info.id] = {field:False}
qty = supplier_info.min_qty
result[supplier_info.id]['qty'] = qty
return result
_columns = {
'name' : fields.many2one('res.partner', 'Supplier', required=True,domain = [('supplier','=',True)], ondelete='cascade', help="Supplier of this product"),
'product_name': fields.char('Supplier Product Name', help="This supplier's product name will be used when printing a request for quotation. Keep empty to use the internal one."),
'product_code': fields.char('Supplier Product Code', help="This supplier's product code will be used when printing a request for quotation. Keep empty to use the internal one."),
'sequence' : fields.integer('Sequence', help="Assigns the priority to the list of product supplier."),
'product_uom': fields.related('product_tmpl_id', 'uom_po_id', type='many2one', relation='product.uom', string="Supplier Unit of Measure", readonly="1", help="This comes from the product form."),
'min_qty': fields.float('Minimal Quantity', required=True, help="The minimal quantity to purchase to this supplier, expressed in the supplier Product Unit of Measure if not empty, in the default unit of measure of the product otherwise."),
'qty': fields.function(_calc_qty, store=True, type='float', string='Quantity', multi="qty", help="This is a quantity which is converted into Default Unit of Measure."),
'product_tmpl_id' : fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade', select=True, oldname='product_id'),
'delay' : fields.integer('Delivery Lead Time', required=True, help="Lead time in days between the confirmation of the purchase order and the receipt of the products in your warehouse. Used by the scheduler for automatic computation of the purchase order planning."),
'pricelist_ids': fields.one2many('pricelist.partnerinfo', 'suppinfo_id', 'Supplier Pricelist', copy=True),
'company_id':fields.many2one('res.company','Company',select=1),
}
_defaults = {
'min_qty': 0.0,
'sequence': 1,
'delay': 1,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'product.supplierinfo', context=c),
}
def price_get(self, cr, uid, supplier_ids, product_id, product_qty=1, context=None):
"""
Calculate price from supplier pricelist.
@param supplier_ids: Ids of res.partner object.
@param product_id: Id of product.
@param product_qty: specify quantity to purchase.
"""
if type(supplier_ids) in (int,long,):
supplier_ids = [supplier_ids]
res = {}
product_pool = self.pool.get('product.product')
partner_pool = self.pool.get('res.partner')
pricelist_pool = self.pool.get('product.pricelist')
currency_pool = self.pool.get('res.currency')
currency_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
# Compute price from standard price of product
product_price = product_pool.price_get(cr, uid, [product_id], 'standard_price', context=context)[product_id]
product = product_pool.browse(cr, uid, product_id, context=context)
for supplier in partner_pool.browse(cr, uid, supplier_ids, context=context):
price = product_price
# Compute price from Purchase pricelist of supplier
pricelist_id = supplier.property_product_pricelist_purchase.id
if pricelist_id:
price = pricelist_pool.price_get(cr, uid, [pricelist_id], product_id, product_qty, context=context).setdefault(pricelist_id, 0)
price = currency_pool.compute(cr, uid, pricelist_pool.browse(cr, uid, pricelist_id).currency_id.id, currency_id, price)
# Compute price from supplier pricelist which are in Supplier Information
supplier_info_ids = self.search(cr, uid, [('name','=',supplier.id),('product_tmpl_id','=',product.product_tmpl_id.id)])
if supplier_info_ids:
cr.execute('SELECT * ' \
'FROM pricelist_partnerinfo ' \
'WHERE suppinfo_id IN %s' \
'AND min_quantity <= %s ' \
'ORDER BY min_quantity DESC LIMIT 1', (tuple(supplier_info_ids),product_qty,))
res2 = cr.dictfetchone()
if res2:
price = res2['price']
res[supplier.id] = price
return res
_order = 'sequence'
class pricelist_partnerinfo(osv.osv):
_name = 'pricelist.partnerinfo'
_columns = {
'name': fields.char('Description'),
'suppinfo_id': fields.many2one('product.supplierinfo', 'Partner Information', required=True, ondelete='cascade'),
'min_quantity': fields.float('Quantity', required=True, help="The minimal quantity to trigger this rule, expressed in the supplier Unit of Measure if any or in the default Unit of Measure of the product otherrwise."),
'price': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Product Price'), help="This price will be considered as a price for the supplier Unit of Measure if any or the default Unit of Measure of the product otherwise"),
}
_order = 'min_quantity asc'
class res_currency(osv.osv):
_inherit = 'res.currency'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT digits FROM decimal_precision WHERE name like %s',('Account',))
digits = cr.fetchone()
if digits and len(digits):
digits = digits[0]
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for currency_id in ids:
if currency_id == main_currency.id:
if main_currency.rounding < 10 ** -digits:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define a rounding factor for the company\'s main currency that is smaller than the decimal precision of \'Account\'.', ['rounding']),
]
class decimal_precision(osv.osv):
_inherit = 'decimal.precision'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT id, digits FROM decimal_precision WHERE name like %s',('Account',))
res = cr.fetchone()
if res and len(res):
account_precision_id, digits = res
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for decimal_precision in ids:
if decimal_precision == account_precision_id:
if main_currency.rounding < 10 ** -digits:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define the decimal precision of \'Account\' as greater than the rounding factor of the company\'s main currency', ['digits']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
chenke91/flask
|
refs/heads/master
|
docs/flaskext.py
|
2228
|
# flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
|
hiway/micropython
|
refs/heads/master
|
examples/hwapi/hwconfig_esp8266_esp12.py
|
41
|
from machine import Pin, Signal
# ESP12 module as used by many boards
# Blue LED on pin 2, active low (inverted)
LED = Signal(2, Pin.OUT, invert=True)
|
samkreter/MarketDepthCalculator
|
refs/heads/master
|
bottle.py
|
13
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent and eventlet server adapters need to patch some modules before
# they are imported. This is why we parse the commandline parameters here but
# handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(
usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin",
action="append",
help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server:
if _cmd_options.server.startswith('gevent'):
import gevent.monkey
gevent.monkey.patch_all()
elif _cmd_options.server.startswith('eventlet'):
import eventlet
eventlet.monkey_patch()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, sys, tempfile, threading, time, warnings
from types import FunctionType
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try:
from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try:
from json import dumps as json_dumps, loads as json_lds
except ImportError:
try:
from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError(
"JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e():
return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a):
raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it):
return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self):
pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern),
None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0]) % 2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" %
(rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][
self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback,
name=None,
plugins=None,
skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
attributes = getattr(func, closure_attr)
func = attributes[0].cell_contents
# in case of decorators with multiple arguments
if not isinstance(func, FunctionType):
# pick first FunctionType instance from multiple arguments
func = filter(lambda x: isinstance(x, FunctionType),
map(lambda x: x.cell_contents, attributes))
func = list(func)[0] # py3 support
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.config):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO', '').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE', '')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
try:
read_func = self.environ['wsgi.input'].read
except KeyError:
self.environ['wsgi.input'] = BytesIO()
return self.environ['wsgi.input']
body_iter = self._iter_chunked if self.chunked else self._iter_body
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request entity too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request entity too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get(
'HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'],
encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') \
or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script, path = path_shift(self.environ.get('SCRIPT_NAME', '/'), self.path, shift)
self['SCRIPT_NAME'], self['PATH_INFO'] = script, path
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH', '')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION', ''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None):
return self.environ.get(value, default)
def __getitem__(self, key):
return self.environ[key]
def __delitem__(self, key):
self[key] = ""
del (self.environ[key])
def __iter__(self):
return iter(self.environ)
def __len__(self):
return len(self.environ)
def keys(self):
return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.' + key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s' % name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s' % name] = value
def _hkey(s):
return s.title().replace('_', '-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type', )),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))
}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output(header=''))
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999:
raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(
_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name):
return _hkey(name) in self._headers
def __delitem__(self, name):
del self._headers[_hkey(name)]
def __getitem__(self, name):
return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value):
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else
str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode)
else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(
value if isinstance(value, unicode) else str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
return [(k, v.encode('utf8').decode('latin1')) for (k, v) in out]
else:
return [(k, v.encode('utf8') if isinstance(v, unicode) else v)
for (k, v) in out]
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty(
'Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try:
return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value):
ls.var = value
def fdel(_):
del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self,
status=None,
body=None,
exception=None,
traceback=None, **options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException):
pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({
'__file__': __file__,
'__path__': [],
'__all__': [],
'__loader__': self
})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.dict)
def __contains__(self, key):
return key in self.dict
def __delitem__(self, key):
del self.dict[key]
def __getitem__(self, key):
return self.dict[key][-1]
def __setitem__(self, key, value):
self.append(key, value)
def keys(self):
return self.dict.keys()
if py3k:
def values(self):
return (v[-1] for v in self.dict.values())
def items(self):
return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self):
return [v[-1] for v in self.dict.values()]
def items(self):
return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key):
return _hkey(key) in self.dict
def __delitem__(self, key):
del self.dict[_hkey(key)]
def __getitem__(self, key):
return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(
value if isinstance(value, unicode) else str(value))
def replace(self, key, value):
self.dict[_hkey(key)] = [value if isinstance(value, unicode) else
str(value)]
def getall(self, key):
return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-', '_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self):
return [x for x in self]
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix + key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname)
fname = fname.encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2 ** 16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2 ** 16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024 * 1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root,
mimetype='auto',
download=False,
charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
if download and download != True:
mimetype, encoding = mimetypes.guess_type(download)
else:
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end - 1, clen)
headers["Content-Length"] = str(end - offset)
if body: body = _file_iter_range(body, offset, end - offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0, )) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen - int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end) + 1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x == y else 1
for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n', ' ')\
.replace('\r', ' ').replace('\t', '	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__', '/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls,
handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port, _quiet=self.quiet)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler,
host=self.host,
port=str(self.port), **self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port, address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self, handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', {'wsgi_app': handler})
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AiohttpServer(ServerAdapter):
""" Untested.
aiohttp
https://pypi.python.org/pypi/aiohttp/
"""
def run(self, handler):
import asyncio
from aiohttp.wsgi import WSGIServerHttpProtocol
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
protocol_factory = lambda: WSGIServerHttpProtocol(
handler,
readpayload=True,
debug=(not self.quiet))
self.loop.run_until_complete(self.loop.create_server(protocol_factory,
self.host,
self.port))
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: self.loop.stop())
try:
self.loop.run_forever()
except KeyboardInterrupt:
self.loop.stop()
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer,
WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO': GeventSocketIOServer,
'rocket': RocketServer,
'bjoern': BjoernServer,
'aiohttp': AiohttpServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN
NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None,
server='wsgiref',
host='127.0.0.1',
port=8080,
interval=1,
reloader=False,
quiet=False,
plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
import subprocess
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" %
(__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" %
(server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl', 'html', 'thtml', 'stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self,
source=None,
name=None,
lookup=None,
encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.',
True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding': self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name,
filename=self.filename,
lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self,
escape_func=html_escape,
noescape=False,
syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are not supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({
'_stdout': _stdout,
'_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env),
'_rebase': None,
'_str': self._str,
'_escape': self._escape,
'get': env.get,
'setdefault': env.setdefault,
'defined': env.__contains__
})
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}
stdout = []
for dictarg in args:
env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError):
pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# We use the verbose (?x) regex mode to make this more manageable
_re_tok = _re_inl = r'''((?mx) # verbose and dot-matches-newline mode
[urbURB]*
(?: ''(?!')
|""(?!")
|'{6}
|"{6}
|'(?:[^\\']|\\.)+?'
|"(?:[^\\"]|\\.)+?"
|'{3}(?:[^\\]|\\.|\n)+?'{3}
|"{3}(?:[^\\]|\\.|\n)+?"{3}
)
)'''
_re_inl = _re_tok.replace(r'|\n', '') # We re-use this string pattern later
_re_tok += r'''
# 2: Comments (until end of line, but not the newline itself)
|(\#.*)
# 3: Open and close (4) grouping tokens
|([\[\{\(])
|([\]\}\)])
# 5,6: Keywords that start or continue a python block (only start of line)
|^([\ \t]*(?:if|for|while|with|try|def|class)\b)
|^([\ \t]*(?:elif|else|except|finally)\b)
# 7: Our special 'end' keyword (but only if it stands alone)
|((?:^|;)[\ \t]*end[\ \t]*(?=(?:%(block_close)s[\ \t]*)?\r?$|;|\#))
# 8: A customizable end-of-code-block template token (only end of line)
|(%(block_close)s[\ \t]*(?=\r?$))
# 9: And finally, a single newline. The 10th token is 'everything else'
|(\r?\n)
'''
# Match the start tokens of code areas in a template
_re_split = r'''(?m)^[ \t]*(\\?)((%(line_start)s)|(%(block_start)s))'''
# Match inline statements (may contain python strings)
_re_inl = r'''%%(inline_start)s((?:%s|[^'"\n]+?)*?)%%(inline_end)s''' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
self.paren_depth = 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p % pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source, pos=self.offset)
if m:
text = self.source[self.offset:m.start()]
self.text_buffer.append(text)
self.offset = m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[m.start():m.start(1)] +
m.group(2) + line + sep)
self.offset += len(line + sep)
continue
self.flush_text()
self.offset += self.read_code(self.source[self.offset:],
multiline=bool(m.group(4)))
else:
break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, pysource, multiline):
code_line, comment = '', ''
offset = 0
while True:
m = self.re_tok.search(pysource, pos=offset)
if not m:
code_line += pysource[offset:]
offset = len(pysource)
self.write_code(code_line.strip(), comment)
break
code_line += pysource[offset:m.start()]
offset = m.end()
_str, _com, _po, _pc, _blk1, _blk2, _end, _cend, _nl = m.groups()
if self.paren_depth > 0 and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _po: # open parenthesis
self.paren_depth += 1
code_line += _po
elif _pc: # close parenthesis
if self.paren_depth > 0:
# we could check for matching parentheses here, but it's
# easier to leave that to python - just check counts
self.paren_depth -= 1
code_line += _pc
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
return offset
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n' + ' ' * self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n') + 1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent + self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]:
kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template,
template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s' % (k, v))
for (k, v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else
__name__ + ".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n' % __version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application entry point specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0],
host=host,
port=int(port),
server=opt.server,
reloader=opt.reload,
plugins=opt.plugin,
debug=opt.debug)
# THE END
|
ISIFoundation/influenzanet-website
|
refs/heads/master
|
apps/survey/migrations/0008_auto.py
|
4
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field user on 'SurveyUser'
db.delete_table('survey_surveyuser_user')
def backwards(self, orm):
# Adding M2M table for field user on 'SurveyUser'
db.create_table('survey_surveyuser_user', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('surveyuser', models.ForeignKey(orm['survey.surveyuser'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('survey_surveyuser_user', ['surveyuser_id', 'user_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.extraresponse': {
'Meta': {'object_name': 'ExtraResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.lastresponse': {
'Meta': {'object_name': 'LastResponse'},
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Participation']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'})
},
'survey.localflusurvey': {
'Meta': {'object_name': 'LocalFluSurvey'},
'age_user': ('django.db.models.fields.SmallIntegerField', [], {}),
'data': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.localprofile': {
'Meta': {'object_name': 'LocalProfile'},
'a_family': ('django.db.models.fields.SmallIntegerField', [], {}),
'a_smoker': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_current': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_seasonal': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'a_vaccine_prev_swine': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'birth_date': ('django.db.models.fields.DateField', [], {}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'sq_date_first': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_date_last': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'sq_num_season': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'sq_num_total': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'surveyuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
'survey.localresponse': {
'Meta': {'object_name': 'LocalResponse'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.participation': {
'Meta': {'object_name': 'Participation'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'epidb_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'previous_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'previous_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Survey']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"})
},
'survey.profile': {
'Meta': {'object_name': 'Profile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['survey.Survey']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']", 'unique': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.profilesendqueue': {
'Meta': {'object_name': 'ProfileSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.SurveyUser']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.responsesendqueue': {
'Meta': {'object_name': 'ResponseSendQueue'},
'answers': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']"}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'specification': ('django.db.models.fields.TextField', [], {}),
'survey_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'survey.surveyuser': {
'Meta': {'object_name': 'SurveyUser'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'global_id': ('django.db.models.fields.CharField', [], {'default': "'3e49b445-8490-4f35-8d5a-63eb7d66b084'", 'unique': 'True', 'max_length': '36'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_participation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Participation']", 'null': 'True'}),
'last_participation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'oneuser': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['survey']
|
duyetdev/islab-portfolio-by-ghost
|
refs/heads/master
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/jvm.py
|
94
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.jvm
~~~~~~~~~~~~~~~~~~~
Pygments lexers for JVM languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import get_choice_opt
from pygments import unistring as uni
__all__ = ['JavaLexer', 'ScalaLexer', 'GosuLexer', 'GosuTemplateLexer',
'GroovyLexer', 'IokeLexer', 'ClojureLexer', 'KotlinLexer',
'XtendLexer', 'AspectJLexer', 'CeylonLexer']
class JavaLexer(RegexLexer):
"""
For `Java <http://www.sun.com/java/>`_ source code.
"""
name = 'Java'
aliases = ['java']
filenames = ['*.java']
mimetypes = ['text/x-java']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]<>]*\s+)+?)' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while)\b',
Keyword),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text), 'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
(r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop')
],
}
class AspectJLexer(JavaLexer):
"""
For `AspectJ <http://www.eclipse.org/aspectj/>`_ source code.
*New in Pygments 1.6.*
"""
name = 'AspectJ'
aliases = ['aspectj']
filenames = ['*.aj']
mimetypes = ['text/x-aspectj']
aj_keywords = [
'aspect', 'pointcut', 'privileged', 'call', 'execution',
'initialization', 'preinitialization', 'handler', 'get', 'set',
'staticinitialization', 'target', 'args', 'within', 'withincode',
'cflow', 'cflowbelow', 'annotation', 'before', 'after', 'around',
'proceed', 'throwing', 'returning', 'adviceexecution', 'declare',
'parents', 'warning', 'error', 'soft', 'precedence', 'thisJoinPoint',
'thisJoinPointStaticPart', 'thisEnclosingJoinPointStaticPart',
'issingleton', 'perthis', 'pertarget', 'percflow', 'percflowbelow',
'pertypewithin', 'lock', 'unlock', 'thisAspectInstance'
]
aj_inter_type = ['parents:', 'warning:', 'error:', 'soft:', 'precedence:']
aj_inter_type_annotation = ['@type', '@method', '@constructor', '@field']
def get_tokens_unprocessed(self, text):
for index, token, value in JavaLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.aj_keywords:
yield index, Keyword, value
elif token is Name.Label and value in self.aj_inter_type:
yield index, Keyword, value[:-1]
yield index, Operator, value[-1]
elif token is Name.Decorator and value in self.aj_inter_type_annotation:
yield index, Keyword, value
else:
yield index, token, value
class ScalaLexer(RegexLexer):
"""
For `Scala <http://www.scala-lang.org>`_ source code.
"""
name = 'Scala'
aliases = ['scala']
filenames = ['*.scala']
mimetypes = ['text/x-scala']
flags = re.MULTILINE | re.DOTALL
# don't use raw unicode strings!
op = ('[-~\\^\\*!%&\\\\<>\\|+=:/?@\u00a6-\u00a7\u00a9\u00ac\u00ae\u00b0-\u00b1'
'\u00b6\u00d7\u00f7\u03f6\u0482\u0606-\u0608\u060e-\u060f\u06e9'
'\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0cf1-\u0cf2'
'\u0d79\u0f01-\u0f03\u0f13-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38'
'\u0fbe-\u0fc5\u0fc7-\u0fcf\u109e-\u109f\u1360\u1390-\u1399\u1940'
'\u19e0-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2044\u2052\u207a-\u207c'
'\u208a-\u208c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2118'
'\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u2140-\u2144'
'\u214a-\u214d\u214f\u2190-\u2328\u232b-\u244a\u249c-\u24e9\u2500-\u2767'
'\u2794-\u27c4\u27c7-\u27e5\u27f0-\u2982\u2999-\u29d7\u29dc-\u29fb'
'\u29fe-\u2b54\u2ce5-\u2cea\u2e80-\u2ffb\u3004\u3012-\u3013\u3020'
'\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3'
'\u3200-\u321e\u322a-\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u33ff'
'\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ufb29\ufdfd\ufe62\ufe64-\ufe66'
'\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe4\uffe8-\uffee\ufffc-\ufffd]+')
letter = ('[a-zA-Z\\$_\u00aa\u00b5\u00ba\u00c0-\u00d6\u00d8-\u00f6'
'\u00f8-\u02af\u0370-\u0373\u0376-\u0377\u037b-\u037d\u0386'
'\u0388-\u03f5\u03f7-\u0481\u048a-\u0556\u0561-\u0587\u05d0-\u05f2'
'\u0621-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5'
'\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5'
'\u07b1\u07ca-\u07ea\u0904-\u0939\u093d\u0950\u0958-\u0961'
'\u0972-\u097f\u0985-\u09b9\u09bd\u09ce\u09dc-\u09e1\u09f0-\u09f1'
'\u0a05-\u0a39\u0a59-\u0a5e\u0a72-\u0a74\u0a85-\u0ab9\u0abd'
'\u0ad0-\u0ae1\u0b05-\u0b39\u0b3d\u0b5c-\u0b61\u0b71\u0b83-\u0bb9'
'\u0bd0\u0c05-\u0c3d\u0c58-\u0c61\u0c85-\u0cb9\u0cbd\u0cde-\u0ce1'
'\u0d05-\u0d3d\u0d60-\u0d61\u0d7a-\u0d7f\u0d85-\u0dc6\u0e01-\u0e30'
'\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0eb0\u0eb2-\u0eb3\u0ebd-\u0ec4'
'\u0edc-\u0f00\u0f40-\u0f6c\u0f88-\u0f8b\u1000-\u102a\u103f'
'\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070'
'\u1075-\u1081\u108e\u10a0-\u10fa\u1100-\u135a\u1380-\u138f'
'\u13a0-\u166c\u166f-\u1676\u1681-\u169a\u16a0-\u16ea\u16ee-\u1711'
'\u1720-\u1731\u1740-\u1751\u1760-\u1770\u1780-\u17b3\u17dc'
'\u1820-\u1842\u1844-\u18a8\u18aa-\u191c\u1950-\u19a9\u19c1-\u19c7'
'\u1a00-\u1a16\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf'
'\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1d00-\u1d2b\u1d62-\u1d77'
'\u1d79-\u1d9a\u1e00-\u1fbc\u1fbe\u1fc2-\u1fcc\u1fd0-\u1fdb'
'\u1fe0-\u1fec\u1ff2-\u1ffc\u2071\u207f\u2102\u2107\u210a-\u2113'
'\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139'
'\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c7c'
'\u2c80-\u2ce4\u2d00-\u2d65\u2d80-\u2dde\u3006-\u3007\u3021-\u3029'
'\u3038-\u303a\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff-\u318e'
'\u31a0-\u31b7\u31f0-\u31ff\u3400-\u4db5\u4e00-\ua014\ua016-\ua48c'
'\ua500-\ua60b\ua610-\ua61f\ua62a-\ua66e\ua680-\ua697\ua722-\ua76f'
'\ua771-\ua787\ua78b-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822'
'\ua840-\ua873\ua882-\ua8b3\ua90a-\ua925\ua930-\ua946\uaa00-\uaa28'
'\uaa40-\uaa42\uaa44-\uaa4b\uac00-\ud7a3\uf900-\ufb1d\ufb1f-\ufb28'
'\ufb2a-\ufd3d\ufd50-\ufdfb\ufe70-\ufefc\uff21-\uff3a\uff41-\uff5a'
'\uff66-\uff6f\uff71-\uff9d\uffa0-\uffdc]')
upper = ('[A-Z\\$_\u00c0-\u00d6\u00d8-\u00de\u0100\u0102\u0104\u0106\u0108'
'\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c'
'\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130'
'\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145'
'\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a'
'\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e'
'\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182'
'\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194'
'\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7'
'\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc'
'\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9'
'\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee'
'\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204'
'\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218'
'\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c'
'\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246'
'\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u0386\u0388-\u038f'
'\u0391-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0'
'\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7'
'\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a'
'\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e'
'\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a'
'\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae'
'\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1'
'\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6'
'\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea'
'\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe'
'\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512'
'\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0531-\u0556'
'\u10a0-\u10c5\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e'
'\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22'
'\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36'
'\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a'
'\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e'
'\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72'
'\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86'
'\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2'
'\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6'
'\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca'
'\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede'
'\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2'
'\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d'
'\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59-\u1f5f'
'\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb'
'\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112'
'\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133'
'\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67'
'\u2c69\u2c6b\u2c6d-\u2c6f\u2c72\u2c75\u2c80\u2c82\u2c84\u2c86'
'\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a'
'\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae'
'\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2'
'\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6'
'\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\ua640\ua642\ua644\ua646'
'\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a'
'\ua65c\ua65e\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682'
'\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696'
'\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736'
'\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a'
'\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e'
'\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b'
'\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\uff21-\uff3a]')
idrest = r'%s(?:%s|[0-9])*(?:(?<=_)%s)?' % (letter, letter, op)
tokens = {
'root': [
# method names
(r'(class|trait|object)(\s+)', bygroups(Keyword, Text), 'class'),
(r"'%s" % idrest, Text.Symbol),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'@%s' % idrest, Name.Decorator),
(r'(abstract|ca(?:se|tch)|d(?:ef|o)|e(?:lse|xtends)|'
r'f(?:inal(?:ly)?|or(?:Some)?)|i(?:f|mplicit)|'
r'lazy|match|new|override|pr(?:ivate|otected)'
r'|re(?:quires|turn)|s(?:ealed|uper)|'
r't(?:h(?:is|row)|ry)|va[lr]|w(?:hile|ith)|yield)\b|'
'(<[%:-]|=>|>:|[#=@_\u21D2\u2190])(\\b|(?=\\s)|$)', Keyword),
(r':(?!%s)' % op, Keyword, 'type'),
(r'%s%s\b' % (upper, idrest), Name.Class),
(r'(true|false|null)\b', Keyword.Constant),
(r'(import|package)(\s+)', bygroups(Keyword, Text), 'import'),
(r'(type)(\s+)', bygroups(Keyword, Text), 'type'),
(r'""".*?"""(?!")', String),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
# (ur'(\.)(%s|%s|`[^`]+`)' % (idrest, op), bygroups(Operator,
# Name.Attribute)),
(idrest, Name),
(r'`[^`]+`', Name),
(r'\[', Operator, 'typeparam'),
(r'[\(\)\{\};,.#]', Operator),
(op, Operator),
(r'([0-9][0-9]*\.[0-9]*|\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?',
Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'(%s|%s|`[^`]+`)(\s*)(\[)' % (idrest, op),
bygroups(Name.Class, Text, Operator), 'typeparam'),
(r'\s+', Text),
(r'{', Operator, '#pop'),
(r'\(', Operator, '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
(r'%s|%s|`[^`]+`' % (idrest, op), Name.Class, '#pop'),
],
'type': [
(r'\s+', Text),
('<[%:]|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([,\);}]|=>|=)(\s*)', bygroups(Operator, Text), '#pop'),
(r'[\(\{]', Operator, '#push'),
(r'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)(\[)' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text, Operator), ('#pop', 'typeparam')),
(r'((?:%s|%s|`[^`]+`)(?:\.(?:%s|%s|`[^`]+`))*)(\s*)$' %
(idrest, op, idrest, op),
bygroups(Keyword.Type, Text), '#pop'),
(r'//.*?\n', Comment.Single, '#pop'),
(r'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'typeparam': [
(r'[\s,]+', Text),
('<[%:]|=>|>:|[#_\u21D2]|forSome|type', Keyword),
(r'([\]\)\}])', Operator, '#pop'),
(r'[\(\[\{]', Operator, '#push'),
(r'\.|%s|%s|`[^`]+`' % (idrest, op), Keyword.Type)
],
'comment': [
(r'[^/\*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'import': [
(r'(%s|\.)+' % idrest, Name.Namespace, '#pop')
],
}
class GosuLexer(RegexLexer):
"""
For Gosu source code.
*New in Pygments 1.5.*
"""
name = 'Gosu'
aliases = ['gosu']
filenames = ['*.gs', '*.gsx', '*.gsp', '*.vark']
mimetypes = ['text/x-gosu']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # modifiers etc.
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
(r'(in|as|typeof|statictypeof|typeis|typeas|if|else|foreach|for|'
r'index|while|do|continue|break|return|try|catch|finally|this|'
r'throw|new|switch|case|default|eval|super|outer|classpath|'
r'using)\b', Keyword),
(r'(var|delegate|construct|function|private|internal|protected|'
r'public|abstract|override|final|static|extends|transient|'
r'implements|represents|readonly)\b', Keyword.Declaration),
(r'(property\s+)(get|set)?', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void|block)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null|NaN|Infinity)\b', Keyword.Constant),
(r'(class|interface|enhancement|enum)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword.Declaration, Text, Name.Class)),
(r'(uses)(\s+)([a-zA-Z0-9_.]+\*?)',
bygroups(Keyword.Namespace, Text, Name.Namespace)),
(r'"', String, 'string'),
(r'(\??[\.#])([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Name.Attribute)),
(r'(:)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
(r'and|or|not|[\\~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\n', Text)
],
'templateText': [
(r'(\\<)|(\\\$)', String),
(r'(<%@\s+)(extends|params)',
bygroups(Operator, Name.Decorator), 'stringTemplate'),
(r'<%!--.*?--%>', Comment.Multiline),
(r'(<%)|(<%=)', Operator, 'stringTemplate'),
(r'\$\{', Operator, 'stringTemplateShorthand'),
(r'.', String)
],
'string': [
(r'"', String, '#pop'),
include('templateText')
],
'stringTemplate': [
(r'"', String, 'string'),
(r'%>', Operator, '#pop'),
include('root')
],
'stringTemplateShorthand': [
(r'"', String, 'string'),
(r'\{', Operator, 'stringTemplateShorthand'),
(r'\}', Operator, '#pop'),
include('root')
],
}
class GosuTemplateLexer(Lexer):
"""
For Gosu templates.
*New in Pygments 1.5.*
"""
name = 'Gosu Template'
aliases = ['gst']
filenames = ['*.gst']
mimetypes = ['text/x-gosu-template']
lexer = GosuLexer()
def get_tokens_unprocessed(self, text):
stack = ['templateText']
for item in self.lexer.get_tokens_unprocessed(text, stack):
yield item
class GroovyLexer(RegexLexer):
"""
For `Groovy <http://groovy.codehaus.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Groovy'
aliases = ['groovy']
filenames = ['*.groovy']
mimetypes = ['text/x-groovy']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while|in|as)\b',
Keyword),
(r'(abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(def|boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'\$/((?!/\$).)*/\$', String),
(r'/(\\\\|\\"|[^/])*/', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-fA-F]{4}'", String.Char),
(r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
(r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop')
],
}
class IokeLexer(RegexLexer):
"""
For `Ioke <http://ioke.org/>`_ (a strongly typed, dynamic,
prototype based programming language) source.
*New in Pygments 1.4.*
"""
name = 'Ioke'
filenames = ['*.ik']
aliases = ['ioke', 'ik']
mimetypes = ['text/x-iokesrc']
tokens = {
'interpolatableText': [
(r'(\\b|\\e|\\t|\\n|\\f|\\r|\\"|\\\\|\\#|\\\Z|\\u[0-9a-fA-F]{1,4}'
r'|\\[0-3]?[0-7]?[0-7])', String.Escape),
(r'#{', Punctuation, 'textInterpolationRoot')
],
'text': [
(r'(?<!\\)"', String, '#pop'),
include('interpolatableText'),
(r'[^"]', String)
],
'documentation': [
(r'(?<!\\)"', String.Doc, '#pop'),
include('interpolatableText'),
(r'[^"]', String.Doc)
],
'textInterpolationRoot': [
(r'}', Punctuation, '#pop'),
include('root')
],
'slashRegexp': [
(r'(?<!\\)/[oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\/', String.Regex),
(r'[^/]', String.Regex)
],
'squareRegexp': [
(r'(?<!\\)][oxpniums]*', String.Regex, '#pop'),
include('interpolatableText'),
(r'\\]', String.Regex),
(r'[^\]]', String.Regex)
],
'squareText': [
(r'(?<!\\)]', String, '#pop'),
include('interpolatableText'),
(r'[^\]]', String)
],
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r';(.*?)\n', Comment),
(r'\A#!(.*?)\n', Comment),
#Regexps
(r'#/', String.Regex, 'slashRegexp'),
(r'#r\[', String.Regex, 'squareRegexp'),
#Symbols
(r':[a-zA-Z0-9_!:?]+', String.Symbol),
(r'[a-zA-Z0-9_!:?]+:(?![a-zA-Z0-9_!?])', String.Other),
(r':"(\\\\|\\"|[^"])*"', String.Symbol),
#Documentation
(r'((?<=fn\()|(?<=fnx\()|(?<=method\()|(?<=macro\()|(?<=lecro\()'
r'|(?<=syntax\()|(?<=dmacro\()|(?<=dlecro\()|(?<=dlecrox\()'
r'|(?<=dsyntax\())\s*"', String.Doc, 'documentation'),
#Text
(r'"', String, 'text'),
(r'#\[', String, 'squareText'),
#Mimic
(r'[a-zA-Z0-9_][a-zA-Z0-9!?_:]+(?=\s*=.*mimic\s)', Name.Entity),
#Assignment
(r'[a-zA-Z_][a-zA-Z0-9_!:?]*(?=[\s]*[+*/-]?=[^=].*($|\.))',
Name.Variable),
# keywords
(r'(break|cond|continue|do|ensure|for|for:dict|for:set|if|let|'
r'loop|p:for|p:for:dict|p:for:set|return|unless|until|while|'
r'with)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# Origin
(r'(eval|mimic|print|println)(?![a-zA-Z0-9!:_?])', Keyword),
# Base
(r'(cell\?|cellNames|cellOwner\?|cellOwner|cells|cell|'
r'documentation|hash|identity|mimic|removeCell\!|undefineCell\!)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
# Ground
(r'(stackTraceAsText)(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehaviour Literals
(r'(dict|list|message|set)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Case
(r'(case|case:and|case:else|case:nand|case:nor|case:not|case:or|'
r'case:otherwise|case:xor)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Reflection
(r'(asText|become\!|derive|freeze\!|frozen\?|in\?|is\?|kind\?|'
r'mimic\!|mimics|mimics\?|prependMimic\!|removeAllMimics\!|'
r'removeMimic\!|same\?|send|thaw\!|uniqueHexId)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehaviour Aspects
(r'(after|around|before)(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# DefaultBehaviour
(r'(kind|cellDescriptionDict|cellSummary|genSym|inspect|notice)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
(r'(use|destructuring)', Keyword.Reserved),
#DefaultBehavior BaseBehavior
(r'(cell\?|cellOwner\?|cellOwner|cellNames|cells|cell|'
r'documentation|identity|removeCell!|undefineCell)'
r'(?![a-zA-Z0-9!:_?])', Keyword),
#DefaultBehavior Internal
(r'(internal:compositeRegexp|internal:concatenateText|'
r'internal:createDecimal|internal:createNumber|'
r'internal:createRegexp|internal:createText)'
r'(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
#DefaultBehaviour Conditions
(r'(availableRestarts|bind|error\!|findRestart|handle|'
r'invokeRestart|rescue|restart|signal\!|warn\!)'
r'(?![a-zA-Z0-9!:_?])', Keyword.Reserved),
# constants
(r'(nil|false|true)(?![a-zA-Z0-9!:_?])', Name.Constant),
# names
(r'(Arity|Base|Call|Condition|DateTime|Aspects|Pointcut|'
r'Assignment|BaseBehavior|Boolean|Case|AndCombiner|Else|'
r'NAndCombiner|NOrCombiner|NotCombiner|OrCombiner|XOrCombiner|'
r'Conditions|Definitions|FlowControl|Internal|Literals|'
r'Reflection|DefaultMacro|DefaultMethod|DefaultSyntax|Dict|'
r'FileSystem|Ground|Handler|Hook|IO|IokeGround|Struct|'
r'LexicalBlock|LexicalMacro|List|Message|Method|Mixins|'
r'NativeMethod|Number|Origin|Pair|Range|Reflector|Regexp Match|'
r'Regexp|Rescue|Restart|Runtime|Sequence|Set|Symbol|'
r'System|Text|Tuple)(?![a-zA-Z0-9!:_?])', Name.Builtin),
# functions
(r'(generateMatchMethod|aliasMethod|\u03bb|\u028E|fnx|fn|method|'
r'dmacro|dlecro|syntax|macro|dlecrox|lecrox|lecro|syntax)'
r'(?![a-zA-Z0-9!:_?])', Name.Function),
# Numbers
(r'-?0[xX][0-9a-fA-F]+', Number.Hex),
(r'-?(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'-?\d+', Number.Integer),
(r'#\(', Punctuation),
# Operators
(r'(&&>>|\|\|>>|\*\*>>|:::|::|\.\.\.|===|\*\*>|\*\*=|&&>|&&=|'
r'\|\|>|\|\|=|\->>|\+>>|!>>|<>>>|<>>|&>>|%>>|#>>|@>>|/>>|\*>>|'
r'\?>>|\|>>|\^>>|~>>|\$>>|=>>|<<=|>>=|<=>|<\->|=~|!~|=>|\+\+|'
r'\-\-|<=|>=|==|!=|&&|\.\.|\+=|\-=|\*=|\/=|%=|&=|\^=|\|=|<\-|'
r'\+>|!>|<>|&>|%>|#>|\@>|\/>|\*>|\?>|\|>|\^>|~>|\$>|<\->|\->|'
r'<<|>>|\*\*|\?\||\?&|\|\||>|<|\*|\/|%|\+|\-|&|\^|\||=|\$|!|~|'
r'\?|#|\u2260|\u2218|\u2208|\u2209)', Operator),
(r'(and|nand|or|xor|nor|return|import)(?![a-zA-Z0-9_!?])',
Operator),
# Punctuation
(r'(\`\`|\`|\'\'|\'|\.|\,|@@|@|\[|\]|\(|\)|{|})', Punctuation),
#kinds
(r'[A-Z][a-zA-Z0-9_!:?]*', Name.Class),
#default cellnames
(r'[a-z_][a-zA-Z0-9_!:?]*', Name)
]
}
class ClojureLexer(RegexLexer):
"""
Lexer for `Clojure <http://clojure.org/>`_ source code.
*New in Pygments 0.11.*
"""
name = 'Clojure'
aliases = ['clojure', 'clj']
filenames = ['*.clj']
mimetypes = ['text/x-clojure', 'application/x-clojure']
special_forms = [
'.', 'def', 'do', 'fn', 'if', 'let', 'new', 'quote', 'var', 'loop'
]
# It's safe to consider 'ns' a declaration thing because it defines a new
# namespace.
declarations = [
'def-', 'defn', 'defn-', 'defmacro', 'defmulti', 'defmethod',
'defstruct', 'defonce', 'declare', 'definline', 'definterface',
'defprotocol', 'defrecord', 'deftype', 'defproject', 'ns'
]
builtins = [
'*', '+', '-', '->', '/', '<', '<=', '=', '==', '>', '>=', '..',
'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns',
'alter', 'and', 'append-child', 'apply', 'array-map', 'aset',
'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float',
'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await',
'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or',
'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?',
'butlast', 'byte', 'cast', 'char', 'children', 'class',
'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator',
'complement', 'concat', 'conj', 'cons', 'constantly', 'cond', 'if-not',
'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct',
'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct',
'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto',
'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure',
'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find',
'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush', 'for',
'fnseq', 'frest', 'gensym', 'get-proxy-class', 'get',
'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import',
'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right',
'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave',
'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys',
'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left',
'lefts', 'line-seq', 'list*', 'list', 'load', 'load-file',
'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1',
'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat',
'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min',
'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next',
'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports',
'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers',
'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial',
'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str',
'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy',
'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find',
're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq',
'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem',
'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys',
'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq',
'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq',
'second', 'select', 'select-keys', 'send', 'send-off', 'seq',
'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort',
'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set',
'special-symbol?', 'split-at', 'split-with', 'str', 'string?',
'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?',
'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array',
'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy',
'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip',
'vector?', 'when', 'when-first', 'when-let', 'when-not',
'with-local-vars', 'with-meta', 'with-open', 'with-out-str',
'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper']
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
# TODO / should divide keywords/symbols into namespace/rest
# but that's hard, so just pretend / is part of the name
valid_name = r'(?!#)[\w!$%*+<=>?/.#-]+'
def _multi_escape(entries):
return '(%s)' % ('|'.join(re.escape(entry) + ' ' for entry in entries))
tokens = {
'root': [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'[,\s]+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0x-?[abcdef\d]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"\\(.|[a-z]+)", String.Char),
# keywords
(r'::?' + valid_name, String.Symbol),
# special operators
(r'~@|[`\'#^~&@]', Operator),
# highlight the special forms
(_multi_escape(special_forms), Keyword),
# Technically, only the special forms are 'keywords'. The problem
# is that only treating them as keywords means that things like
# 'defn' and 'ns' need to be highlighted as builtins. This is ugly
# and weird for most styles. So, as a compromise we're going to
# highlight them as Keyword.Declarations.
(_multi_escape(declarations), Keyword.Declaration),
# highlight the builtins
(_multi_escape(builtins), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# Clojure accepts vector notation
(r'(\[|\])', Punctuation),
# Clojure accepts map notation
(r'(\{|\})', Punctuation),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
}
class TeaLangLexer(RegexLexer):
"""
For `Tea <http://teatrove.org/>`_ source code. Only used within a
TeaTemplateLexer.
*New in Pygments 1.5.*
"""
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
(r'(and|break|else|foreach|if|in|not|or|reverse)\b',
Keyword),
(r'(as|call|define)\b', Keyword.Declaration),
(r'(true|false|null)\b', Keyword.Constant),
(r'(template)(\s+)', bygroups(Keyword.Declaration, Text), 'template'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\'(\\\\|\\\'|[^\'])*\'', String),
(r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
(r'(isa|[.]{3}|[.]{2}|[=#!<>+-/%&;,.\*\\\(\)\[\]\{\}])', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'template': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop')
],
}
class CeylonLexer(RegexLexer):
"""
For `Ceylon <http://ceylon-lang.org/>`_ source code.
*New in Pygments 1.6.*
"""
name = 'Ceylon'
aliases = ['ceylon']
filenames = ['*.ceylon']
mimetypes = ['text/x-ceylon']
flags = re.MULTILINE | re.DOTALL
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(variable|shared|abstract|doc|by|formal|actual|late|native)',
Name.Decorator),
(r'(break|case|catch|continue|default|else|finally|for|in|'
r'variable|if|return|switch|this|throw|try|while|is|exists|dynamic|'
r'nonempty|then|outer|assert)\b', Keyword),
(r'(abstracts|extends|satisfies|adapts|'
r'super|given|of|out|assign|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(function|value|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface|object|alias)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\\{#[0-9a-fA-F]{4}\}'", String.Char),
(r'".*``.*``.*"', String.Interpol),
(r'(\.)([a-z_][a-zA-Z0-9_]*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'\d{1,3}(_\d{3})+\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
(r'\d{1,3}(_\d{3})+\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
Number.Float),
(r'[0-9][0-9]*\.\d{1,3}(_\d{3})+[kMGTPmunpf]?', Number.Float),
(r'[0-9][0-9]*\.[0-9]+([eE][+-]?[0-9]+)?[kMGTPmunpf]?',
Number.Float),
(r'#([0-9a-fA-F]{4})(_[0-9a-fA-F]{4})+', Number.Hex),
(r'#[0-9a-fA-F]+', Number.Hex),
(r'\$([01]{4})(_[01]{4})+', Number.Integer),
(r'\$[01]+', Number.Integer),
(r'\d{1,3}(_\d{3})+[kMGTP]?', Number.Integer),
(r'[0-9]+[kMGTP]?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[A-Za-z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'[a-z][a-zA-Z0-9_.]*',
Name.Namespace, '#pop')
],
}
class KotlinLexer(RegexLexer):
"""
For `Kotlin <http://confluence.jetbrains.net/display/Kotlin/>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
*New in Pygments 1.5.*
"""
name = 'Kotlin'
aliases = ['kotlin']
filenames = ['*.kt']
mimetypes = ['text/x-kotlin'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers,
# see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in list(levels.items()):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|break|catch|'
r'fun|continue|default|delegate|'
r'do|else|enum|extern|false|finally|'
r'fixed|for|goto|if|implicit|in|interface|'
r'internal|is|lock|null|'
r'out|override|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|'
r'when|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|val|var)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|'
r'short)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(package|using)(\s+)', bygroups(Keyword, Text), 'package'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'package': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens.keys()),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class XtendLexer(RegexLexer):
"""
For `Xtend <http://xtend-lang.org/>`_ source code.
*New in Pygments 1.6.*
"""
name = 'Xtend'
aliases = ['xtend']
filenames = ['*.xtend']
mimetypes = ['text/x-xtend']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:[a-zA-Z_][a-zA-Z0-9_\.\[\]]*\s+)+?)' # return arguments
r'([a-zA-Z_$][a-zA-Z0-9_$]*)' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|goto|instanceof|new|return|switch|this|throw|try|while|IF|'
r'ELSE|ELSEIF|ENDIF|FOR|ENDFOR|SEPARATOR|BEFORE|AFTER)\b',
Keyword),
(r'(def|abstract|const|enum|extends|final|implements|native|private|'
r'protected|public|static|strictfp|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Declaration),
(r'(boolean|byte|char|double|float|int|long|short|void)\b',
Keyword.Type),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text),
'class'),
(r'(import)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
(r"(''')", String, 'template'),
(r"(\u00BB)", String, 'template'),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
(r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name),
(r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?-]', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text)
],
'class': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop')
],
'template': [
(r"'''", String, '#pop'),
(r"\u00AB", String, '#pop'),
(r'.', String)
],
}
|
GunoH/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/EmptyLineInList.dst.py
|
166
|
# original function # this is line 1 of the code.
def foo():
print 'f00'
def bar(num):
for _ in range(num):
print 'bar'
bar(7)
<caret> <selection></selection>
|
tkaitchuck/nupic
|
refs/heads/master
|
examples/opf/experiments/spatial_classification/auto_generated/searchDef.py
|
1
|
#! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
###############################################################################
def getSearch(rootDir):
""" This method returns search description. See the following file for the
schema of the dictionary this method returns:
py/grokengine/frameworks/opf/expGenerator/experimentDescriptionSchema.json
The streamDef element defines the stream for this model. The schema for this
element can be found at:
py/grokengine/cluster/database/StreamDef.json
"""
# Form the stream definition
dataPath = os.path.abspath(os.path.join(rootDir, 'datasets', 'scalar_1.csv'))
streamDef = dict(
version = 1,
info = "testSpatialClassification",
streams = [
dict(source="file://%s" % (dataPath),
info="scalar_1.csv",
columns=["*"],
),
],
)
# Generate the experiment description
expDesc = {
"environment": 'grok',
"inferenceArgs":{
"predictedField":"classification",
"predictionSteps": [0],
},
"inferenceType": "MultiStep",
"streamDef": streamDef,
"includedFields": [
{ "fieldName": "field1",
"fieldType": "float",
},
{ "fieldName": "classification",
"fieldType": "string",
},
{ "fieldName": "randomData",
"fieldType": "float",
},
],
"iterationCount": -1,
}
return expDesc
|
takeshineshiro/django
|
refs/heads/master
|
django/conf/locale/ro/formats.py
|
619
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j F Y, H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y, H:i'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
nadavge/notejam
|
refs/heads/master
|
django/notejam/notes/forms.py
|
6
|
from django import forms
from notes.models import Note
class NoteForm(forms.ModelForm):
class Meta:
model = Note
exclude = ('created_at', 'updated_at', 'user')
|
xen0l/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/heroku/heroku_collaborator.py
|
74
|
#!/usr/bin/python
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: heroku_collaborator
short_description: "Add or delete app collaborators on Heroku"
version_added: "2.6"
description:
- Manages collaborators for Heroku apps.
- If set to C(present) and heroku user is already collaborator, then do nothing.
- If set to C(present) and heroku user is not collaborator, then add user to app.
- If set to C(absent) and heroku user is collaborator, then delete user from app.
author:
- Marcel Arns (@marns93)
requirements:
- heroku3
options:
api_key:
description:
- Heroku API key
apps:
description:
- List of Heroku App names
required: true
suppress_invitation:
description:
- Suppress email invitation when creating collaborator
type: bool
default: "no"
user:
description:
- User ID or e-mail
required: true
state:
description:
- Create or remove the heroku collaborator
choices: ["present", "absent"]
default: "present"
notes:
- C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting c(api_key).
- If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"].
'''
EXAMPLES = '''
- heroku_collaborator:
api_key: YOUR_API_KEY
user: max.mustermann@example.com
apps: heroku-example-app
state: present
- heroku_collaborator:
api_key: YOUR_API_KEY
user: '{{ item.user }}'
apps: '{{ item.apps | default(apps) }}'
suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}'
state: '{{ item.state | default("present") }}'
with_items:
- { user: 'a.b@example.com' }
- { state: 'absent', user: 'b.c@example.com', suppress_invitation: false }
- { user: 'x.y@example.com', apps: ["heroku-example-app"] }
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.heroku import HerokuHelper
def add_or_delete_heroku_collaborator(module, client):
user = module.params['user']
state = module.params['state']
affected_apps = []
result_state = False
for app in module.params['apps']:
if app not in client.apps():
module.fail_json(msg='App {0} does not exist'.format(app))
heroku_app = client.apps()[app]
heroku_collaborator_list = [collaborator.user.email for collaborator in heroku_app.collaborators()]
if state == 'absent' and user in heroku_collaborator_list:
if not module.check_mode:
heroku_app.remove_collaborator(user)
affected_apps += [app]
result_state = True
elif state == 'present' and user not in heroku_collaborator_list:
if not module.check_mode:
heroku_app.add_collaborator(user_id_or_email=user, silent=module.params['suppress_invitation'])
affected_apps += [app]
result_state = True
return result_state, affected_apps
def main():
argument_spec = HerokuHelper.heroku_argument_spec()
argument_spec.update(
user=dict(required=True, type='str'),
apps=dict(required=True, type='list'),
suppress_invitation=dict(default=False, type='bool'),
state=dict(default='present', type='str', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
client = HerokuHelper(module).get_heroku_client()
has_changed, msg = add_or_delete_heroku_collaborator(module, client)
module.exit_json(changed=has_changed, msg=msg)
if __name__ == '__main__':
main()
|
pre-commit/pre-commit
|
refs/heads/master
|
pre_commit/logging_handler.py
|
1
|
import contextlib
import logging
from typing import Generator
from pre_commit import color
from pre_commit import output
logger = logging.getLogger('pre_commit')
LOG_LEVEL_COLORS = {
'DEBUG': '',
'INFO': '',
'WARNING': color.YELLOW,
'ERROR': color.RED,
}
class LoggingHandler(logging.Handler):
def __init__(self, use_color: bool) -> None:
super().__init__()
self.use_color = use_color
def emit(self, record: logging.LogRecord) -> None:
level_msg = color.format_color(
f'[{record.levelname}]',
LOG_LEVEL_COLORS[record.levelname],
self.use_color,
)
output.write_line(f'{level_msg} {record.getMessage()}')
@contextlib.contextmanager
def logging_handler(use_color: bool) -> Generator[None, None, None]:
handler = LoggingHandler(use_color)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
try:
yield
finally:
logger.removeHandler(handler)
|
ManageIQ/integration_tests
|
refs/heads/master
|
cfme/utils/__init__.py
|
2
|
import atexit
import os
import re
import subprocess
import threading
from contextlib import contextmanager
from functools import partial
import diaper
from cached_property import cached_property
from werkzeug.local import LocalProxy
# import diaper for backward compatibility
on_rtd = os.environ.get('READTHEDOCS') == 'True'
class TriesExceeded(Exception):
"""Default exception raised when tries() method doesn't catch a func exception"""
pass
class FakeObject:
def __init__(self, **kwargs):
self.__dict__ = kwargs
def fakeobject_or_object(obj, attr, default=None):
if isinstance(obj, str):
return FakeObject(**{attr: obj})
elif not obj:
return FakeObject(**{attr: default})
else:
return obj
def clear_property_cache(obj, *names):
"""
clear a cached property regardess of if it was cached priority
"""
if isinstance(obj, LocalProxy):
obj = obj._get_current_object()
for name in names:
assert isinstance(getattr(type(obj), name), cached_property)
obj.__dict__.pop(name, None)
class _classproperty(property):
"""Subclass property to make classmethod properties possible"""
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
def classproperty(f):
"""Enables properties for whole classes:
Usage:
>>> class Foo(object):
... @classproperty
... def bar(cls):
... return "bar"
...
>>> print(Foo.bar)
baz
"""
return _classproperty(classmethod(f))
def at_exit(f, *args, **kwargs):
"""Diaper-protected atexit handler registering. Same syntax as atexit.register()"""
return atexit.register(lambda: diaper(f, *args, **kwargs))
def _prenormalize_text(text):
"""Makes the text lowercase and removes all characters that are not digits, alphas, or spaces"""
# _'s represent spaces so convert those to spaces too
return re.sub(r"[^a-z0-9 ]", "", text.strip().lower().replace('_', ' '))
def _replace_spaces_with(text, delim):
"""Contracts spaces into one character and replaces it with a custom character."""
return re.sub(r"\s+", delim, text)
def normalize_text(text):
"""Converts a string to a lowercase string containing only letters, digits and spaces.
The space is always one character long if it is present.
"""
return _replace_spaces_with(_prenormalize_text(text), ' ')
def attributize_string(text):
"""Converts a string to a lowercase string containing only letters, digits and underscores.
Usable for eg. generating object key names.
The underscore is always one character long if it is present.
"""
return _replace_spaces_with(_prenormalize_text(text), '_')
def normalize_space(text):
"""Works in accordance with the XPath's normalize-space() operator.
`Description <https://developer.mozilla.org/en-US/docs/Web/XPath/Functions/normalize-space>`_:
*The normalize-space function strips leading and trailing white-space from a string,
replaces sequences of whitespace characters by a single space, and returns the resulting
string.*
"""
return _replace_spaces_with(text.strip(), ' ')
def tries(num_tries, exceptions, f, *args, **kwargs):
""" Tries to call the function multiple times if specific exceptions occur.
Args:
num_tries: How many times to try if exception is raised
exceptions: Tuple (or just single one) of exceptions that should be treated as repeat.
f: Callable to be called.
*args: Arguments to be passed through to the callable
**kwargs: Keyword arguments to be passed through to the callable
Returns:
What ``f`` returns.
Raises:
What ``f`` raises if the try count is exceeded.
"""
caught_exception = TriesExceeded('Tries were exhausted without a func exception')
tries = 0
while tries < num_tries:
tries += 1
try:
return f(*args, **kwargs)
except exceptions as e:
caught_exception = e
pass
else:
raise caught_exception
# There are some environment variables that get smuggled in anyway.
# If there is yet another one that will be possibly smuggled in, update this entry.
READ_ENV_UNWANTED = {'SHLVL', '_', 'PWD'}
def read_env(file):
"""Given a :py:class:`py.path.Local` file name, return a dict of exported shell vars and their
values.
Args:
file: A :py:class:`py.path.Local` instance.
Note:
This will only include shell variables that are exported from the file being parsed
Returns:
A :py:class:`dict` of key/value pairs. If the file does not exist or bash could not
parse the file, this dict will be empty.
"""
env_vars = {}
if file.check():
# parse the file with bash, since it's pretty good at it, and dump the env
# Use env -i to clean up the env (except the very few variables provider by bash itself)
command = ['env', '-i', 'bash', '-c', f'source {file.strpath} && env']
proc = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1)
# filter out the remaining unwanted things
for line in iter(proc.stdout.readline, b''):
try:
key, value = line.split("=", 1)
except ValueError:
continue
if key not in READ_ENV_UNWANTED:
try:
value = int(value.strip())
except (ValueError, TypeError):
value = value.strip()
env_vars[key] = value
stdout, stderr = proc.communicate()
return env_vars
def safe_string(o):
"""This will make string out of ANYTHING without having to worry about the stupid Unicode errors
This function tries to make str/unicode out of ``o`` unless it already is one of those and then
it processes it so in the end there is a harmless ascii string.
Args:
o: Anything.
"""
if not isinstance(o, str):
o = str(o)
if isinstance(o, bytes):
o = o.decode('utf-8', "ignore")
if not isinstance(o, str):
o = o.encode("ascii", "xmlcharrefreplace")
else:
o = o.encode("ascii", "xmlcharrefreplace").decode('ascii')
return o
def process_pytest_path(path):
# Processes the path elements with regards to []
path = path.lstrip("/")
if len(path) == 0:
return []
try:
seg_end = path.index("/")
except ValueError:
seg_end = None
try:
param_start = path.index("[")
except ValueError:
param_start = None
try:
param_end = path.index("]")
except ValueError:
param_end = None
if seg_end is None:
# Definitely a final segment
return [path]
else:
if (param_start is not None and param_end is not None and seg_end > param_start and
seg_end < param_end):
# The / inside []
segment = path[:param_end + 1]
rest = path[param_end + 1:]
return [segment] + process_pytest_path(rest)
else:
# The / that is not inside []
segment = path[:seg_end]
rest = path[seg_end + 1:]
return [segment] + process_pytest_path(rest)
def process_shell_output(value):
"""This function allows you to unify the behaviour when you putput some values to stdout.
You can check the code of the function how exactly does it behave for the particular types of
variables. If no output is expected, it returns None.
Args:
value: Value to be outputted.
Returns:
A tuple consisting of returncode and the output to be printed.
"""
result_lines = []
exit = 0
if isinstance(value, (list, tuple, set)):
for entry in sorted(value):
result_lines.append(entry)
elif isinstance(value, dict):
for key, value in value.items():
result_lines.append(f'{key}={value}')
elif isinstance(value, str):
result_lines.append(value)
elif isinstance(value, bool):
# 'True' result becomes flipped exit 0, and vice versa for False
exit = int(not value)
else:
# Unknown type, print it
result_lines.append(str(value))
return exit, '\n'.join(result_lines) if result_lines else None
def iterate_pairs(iterable):
"""Iterates over iterable, always taking two items at time.
Eg. ``[1, 2, 3, 4, 5, 6]`` will yield ``(1, 2)``, then ``(3, 4)`` ...
Must have even number of items.
Args:
iterable: An iterable with even number of items to be iterated over.
"""
if len(iterable) % 2 != 0:
raise ValueError('Iterable must have even number of items.')
it = iter(iterable)
for i in it:
yield i, next(it)
def icastmap(t, i, *args, **kwargs):
"""Works like the map() but is made specially to map classes on iterables. A generator version.
This function only applies the ``t`` to the item of ``i`` if it is not of that type.
Args:
t: The class that you want all the yielded items to be type of.
i: Iterable with items to be cast.
Returns:
A generator.
"""
for item in i:
if isinstance(item, t):
yield item
else:
yield t(item, *args, **kwargs)
def castmap(t, i, *args, **kwargs):
"""Works like the map() but is made specially to map classes on iterables.
This function only applies the ``t`` to the item of ``i`` if it is not of that type.
Args:
t: The class that you want all theitems in the list to be type of.
i: Iterable with items to be cast.
Returns:
A list.
"""
return list(icastmap(t, i, *args, **kwargs))
class InstanceClassMethod:
""" Decorator-descriptor that enables you to use any method both as class and instance one
Usage:
.. code-block:: python
class SomeClass(object):
@InstanceClassMethod
def a_method(self):
the_instance_variant()
@a_method.classmethod
def a_method(cls):
the_class_variant()
i = SomeClass()
i.a_method()
SomeClass.a_method()
# Both are possible
If you don't pass ``classmethod`` the "instance" method, the one that was passed first will
be called for both kinds of invocation.
"""
def __init__(self, instance_or_class_method):
self.instance_or_class_method = instance_or_class_method
self.class_method = None
def classmethod(self, class_method):
self.class_method = class_method
return self
def __get__(self, o, t):
if o is None:
# classmethod
return partial(self.class_method or self.instance_or_class_method, t)
else:
# instancemethod
return partial(self.instance_or_class_method, o)
class ParamClassName:
""" ParamClassName is a Descriptor to help when using classes and instances as parameters
Note: This descriptor is a hack until collections are implemented everywhere
Usage:
.. code-block:: python
class Provider(object):
_param_name = ParamClassName('name')
def __init__(self, name):
self.name = name
When accessing the ``_param_name`` on the class object it will return the ``__name__`` of the
class by default. When accessing the ``_param_name`` on an instance of the class, it will return
the attribute that is passed in.
"""
def __init__(self, instance_attr, class_attr='__name__'):
self.instance_attr = instance_attr
self.class_attr = class_attr
def __get__(self, instance, owner):
if instance:
return getattr(instance, self.instance_attr)
else:
return getattr(owner, self.class_attr)
@contextmanager
def periodic_call(period_seconds, call, args=None, kwargs=None):
timer = None
args = args or []
kwargs = kwargs or {}
def timer_event():
call(*args, **kwargs)
reschedule()
def reschedule():
nonlocal timer
timer = threading.Timer(period_seconds, timer_event)
timer.start()
reschedule()
try:
yield
finally:
timer.cancel()
|
jendap/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/estimator_test.py
|
81
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimator.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.distributions.python.ops import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_metrics
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_no_variables
from tensorflow.contrib.learn.python.learn.estimators.head_test import _assert_summary_tags
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class EstimatorHeadDistributionRegressionTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LINEAR_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def testNormalLocScaleLogits(self):
# We will bias logits[..., 1] so that: logits[..., 1]=0 implies scale=1.
scale_bias = np.log(np.expm1(1.))
def softplus(x):
return np.log1p(np.exp(x))
def actual_loss(logits, labels):
mu = actual_mean(logits)
sigma = actual_stddev(logits)
labels = np.squeeze(labels, -1)
z = (labels - mu) / sigma
loss = 0.5 * (z**2. + np.log(2. * np.pi)) + np.log(sigma)
return loss.mean()
def actual_mean(logits):
return logits[..., 0]
def actual_stddev(logits):
return softplus(logits[..., 1] + scale_bias)
def make_distribution_fn(logits):
return normal_lib.Normal(
loc=logits[..., 0],
scale=nn_ops.softplus(logits[..., 1] + scale_bias))
head = estimator_lib.estimator_head_distribution_regression(
make_distribution_fn,
logits_dimension=2)
labels = np.float32([[-1.],
[0.],
[1.]])
logits = np.float32([[0., -1],
[1, 0.5],
[-1, 1]])
with ops.Graph().as_default(), session.Session():
# Convert to tensor so we can index into head.distributions.
tflogits = ops.convert_to_tensor(logits, name="logits")
model_fn_ops = head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=tflogits)
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
loss = actual_loss(logits, labels)
_assert_metrics(self, loss, {"loss": loss}, model_fn_ops)
# Now we verify the underlying distribution was correctly constructed.
expected_mean = logits[..., 0]
self.assertAllClose(
expected_mean,
head.distribution(tflogits).mean().eval(),
rtol=1e-6, atol=0.)
expected_stddev = softplus(logits[..., 1] + scale_bias)
self.assertAllClose(
expected_stddev,
head.distribution(tflogits).stddev().eval(),
rtol=1e-6, atol=0.)
# Should have created only one distribution.
self.assertEqual(1, len(head.distributions))
if __name__ == "__main__":
test.main()
|
CydarLtd/ansible
|
refs/heads/devel
|
lib/ansible/modules/storage/infinidat/infini_export.py
|
69
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Gregory Shulov (gregory.shulov@gmail.com)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: infini_export
version_added: 2.3
short_description: Create, Delete or Modify NFS Exports on Infinibox
description:
- This module creates, deletes or modifies NFS exports on Infinibox.
author: Gregory Shulov (@GR360RY)
options:
name:
description:
- Export name. Should always start with C(/). (ex. name=/data)
aliases: ['export', 'path']
required: true
state:
description:
- Creates/Modifies export when present and removes when absent.
required: false
default: "present"
choices: [ "present", "absent" ]
inner_path:
description:
- Internal path of the export.
default: "/"
client_list:
description:
- List of dictionaries with client entries. See examples.
Check infini_export_client module to modify individual NFS client entries for export.
default: "All Hosts(*), RW, no_root_squash: True"
required: false
filesystem:
description:
- Name of exported file system.
required: true
extends_documentation_fragment:
- infinibox
'''
EXAMPLES = '''
- name: Export bar filesystem under foo pool as /data
infini_export:
name: /data01
filesystem: foo
user: admin
password: secret
system: ibox001
- name: Export and specify client list explicitly
infini_export:
name: /data02
filesystem: foo
client_list:
- client: 192.168.0.2
access: RW
no_root_squash: True
- client: 192.168.0.100
access: RO
no_root_squash: False
- client: 192.168.0.10-192.168.0.20
access: RO
no_root_squash: False
system: ibox001
user: admin
password: secret
'''
RETURN = '''
'''
HAS_INFINISDK = True
try:
from infinisdk import InfiniBox, core
except ImportError:
HAS_INFINISDK = False
from ansible.module_utils.infinibox import *
from munch import unmunchify
def transform(d):
return frozenset(d.items())
@api_wrapper
def get_filesystem(module, system):
"""Return Filesystem or None"""
try:
return system.filesystems.get(name=module.params['filesystem'])
except:
return None
@api_wrapper
def get_export(module, filesystem, system):
"""Retrun export if found. When not found return None"""
export = None
exports_to_list = system.exports.to_list()
for e in exports_to_list:
if e.get_export_path() == module.params['name']:
export = e
break
return export
@api_wrapper
def update_export(module, export, filesystem, system):
""" Create new filesystem or update existing one"""
changed = False
name = module.params['name']
client_list = module.params['client_list']
if export is None:
if not module.check_mode:
export = system.exports.create(export_path=name, filesystem=filesystem)
if client_list:
export.update_permissions(client_list)
changed = True
else:
if client_list:
if set(map(transform, unmunchify(export.get_permissions()))) != set(map(transform, client_list)):
if not module.check_mode:
export.update_permissions(client_list)
changed = True
module.exit_json(changed=changed)
@api_wrapper
def delete_export(module, export):
""" Delete file system"""
if not module.check_mode:
export.delete()
module.exit_json(changed=True)
def main():
argument_spec = infinibox_argument_spec()
argument_spec.update(
dict(
name = dict(required=True),
state = dict(default='present', choices=['present', 'absent']),
filesystem = dict(required=True),
client_list = dict(type='list')
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
if not HAS_INFINISDK:
module.fail_json(msg='infinisdk is required for this module')
state = module.params['state']
system = get_system(module)
filesystem = get_filesystem(module, system)
export = get_export(module, filesystem, system)
if filesystem is None:
module.fail_json(msg='Filesystem {} not found'.format(module.params['filesystem']))
if state == 'present':
update_export(module, export, filesystem, system)
elif export and state == 'absent':
delete_export(module, export)
elif export is None and state == 'absent':
module.exit_json(changed=False)
# Import Ansible Utilities
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
alexsmx/djangoAppengineSrcTemplate
|
refs/heads/master
|
django/contrib/webdesign/lorem_ipsum.py
|
439
|
"""
Utility functions for generating "lorem ipsum" Latin text.
"""
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [u' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = u', '.join(sections)
# Convert to sentence case and add end punctuation.
return u'%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return u' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return u' '.join(word_list)
|
zhjunlang/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_dummy_threading.py
|
182
|
from test import support
import unittest
import dummy_threading as _threading
import time
class DummyThreadingTestCase(unittest.TestCase):
class TestThread(_threading.Thread):
def run(self):
global running
global sema
global mutex
# Uncomment if testing another module, such as the real 'threading'
# module.
#delay = random.random() * 2
delay = 0
if support.verbose:
print('task', self.name, 'will run for', delay, 'sec')
sema.acquire()
mutex.acquire()
running += 1
if support.verbose:
print(running, 'tasks are running')
mutex.release()
time.sleep(delay)
if support.verbose:
print('task', self.name, 'done')
mutex.acquire()
running -= 1
if support.verbose:
print(self.name, 'is finished.', running, 'tasks are running')
mutex.release()
sema.release()
def setUp(self):
self.numtasks = 10
global sema
sema = _threading.BoundedSemaphore(value=3)
global mutex
mutex = _threading.RLock()
global running
running = 0
self.threads = []
def test_tasks(self):
for i in range(self.numtasks):
t = self.TestThread(name="<thread %d>"%i)
self.threads.append(t)
t.start()
if support.verbose:
print('waiting for all tasks to complete')
for t in self.threads:
t.join()
if support.verbose:
print('all tasks done')
def test_main():
support.run_unittest(DummyThreadingTestCase)
if __name__ == '__main__':
test_main()
|
siggame/discuss
|
refs/heads/develop
|
discuss/converse/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
jonathanmz34/ztransfert
|
refs/heads/master
|
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py
|
1446
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the MSVSSettings.py file."""
import StringIO
import unittest
import gyp.MSVSSettings as MSVSSettings
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def _ExpectedWarnings(self, expected):
"""Compares recorded lines to expected warnings."""
self.stderr.seek(0)
actual = self.stderr.read().split('\n')
actual = [line for line in actual if line]
self.assertEqual(sorted(expected), sorted(actual))
def testValidateMSVSSettings_tool_names(self):
"""Tests that only MSVS tool names are allowed."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {},
'VCLinkerTool': {},
'VCMIDLTool': {},
'foo': {},
'VCResourceCompilerTool': {},
'VCLibrarianTool': {},
'VCManifestTool': {},
'ClCompile': {}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized tool foo',
'Warning: unrecognized tool ClCompile'])
def testValidateMSVSSettings_settings(self):
"""Tests that for invalid MSVS settings."""
MSVSSettings.ValidateMSVSSettings(
{'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '5',
'BrowseInformation': 'fdkslj',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '-1',
'CompileAs': '1',
'DebugInformationFormat': '2',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': '1',
'ExceptionHandling': '1',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '1',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '1',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'string1;string2',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '1',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '1',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalDependencies_excluded': 'file3',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '2',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'CLRImageType': '2',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '2',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': '2',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'ErrorReporting': '2',
'FixedBaseAddress': '2',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '2',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '2',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '2',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '2',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '2',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'true',
'Version': 'a string1'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'CPreprocessOptions': 'a string1',
'DefaultCharType': '1',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '1',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'notgood': 'bogus',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'VCResourceCompilerTool': {
'AdditionalOptions': 'a string1',
'AdditionalIncludeDirectories': 'folder1;folder2',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'notgood2': 'bogus',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a string1',
'ManifestResourceFile': 'a_file_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'truel',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}},
self.stderr)
self._ExpectedWarnings([
'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
'index value (5) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/BrowseInformation, '
"invalid literal for int() with base 10: 'fdkslj'",
'Warning: for VCCLCompilerTool/CallingConvention, '
'index value (-1) not in expected range [0, 4)',
'Warning: for VCCLCompilerTool/DebugInformationFormat, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
'Warning: for VCLinkerTool/TargetMachine, '
'converted value for 2 not specified.',
'Warning: unrecognized setting VCMIDLTool/notgood',
'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
'Warning: for VCManifestTool/UpdateFileHashes, '
"expected bool; got 'truel'"
''])
def testValidateMSBuildSettings_settings(self):
"""Tests that for invalid MSBuild settings."""
MSVSSettings.ValidateMSBuildSettings(
{'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': ['string1', 'string2'],
'AdditionalUsingDirectories': 'folder1;folder2',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'false',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'BuildingInIDE': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'CompileAsManaged': 'true',
'CreateHotpatchableImage': 'true',
'DebugInformationFormat': 'ProgramDatabase',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'string1;string2',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'Enableprefast': 'bogus',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'SyncCThrow',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Precise',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2',
'ForcedUsingFiles': 'file1;file2',
'FunctionLevelLinking': 'false',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'false',
'MinimalRebuild': 'true',
'MultiProcessorCompilation': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Disabled',
'PrecompiledHeader': 'NotUsing',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'string1;string2',
'PreprocessOutputPath': 'a string1',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'false',
'ProcessorNumber': '33',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TreatSpecificWarningsAsErrors': 'string1;string2',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'string1;string2',
'UseFullPaths': 'true',
'UseUnicodeForAssemblerListing': 'true',
'WarningLevel': 'TurnOffAllWarnings',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name',
'ZZXYZ': 'bogus'},
'Link': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalManifestDependencies': 'file1;file2',
'AdditionalOptions': 'a string1',
'AddModuleNamesToAssembly': 'file1;file2',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2',
'BaseAddress': 'a string1',
'BuildingInIDE': 'true',
'CLRImageType': 'ForceIJWImage',
'CLRSupportLastError': 'Enabled',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'CreateHotPatchableImage': 'X86Image',
'DataExecutionPrevention': 'false',
'DelayLoadDLLs': 'file1;file2',
'DelaySign': 'true',
'Driver': 'NotSet',
'EmbedManagedResourceFile': 'file1;file2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'true',
'EntryPointSymbol': 'a string1',
'FixedBaseAddress': 'false',
'ForceFileOutput': 'Enabled',
'ForceSymbolReferences': 'file1;file2',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a string1',
'HeapReserveSize': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'a_file_list',
'ImageHasSafeExceptionHandlers': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'false',
'LinkDLL': 'true',
'LinkErrorReporting': 'SendErrorReport',
'LinkStatus': 'true',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a string1',
'MidlCommandFile': 'a_file_name',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'MSDOSStubFileName': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': 'false',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'PreventDllBinding': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SectionAlignment': '33',
'SetChecksum': 'true',
'ShowProgress': 'LinkVerboseREF',
'SpecifySectionAttributes': 'a string1',
'StackCommitSize': 'a string1',
'StackReserveSize': 'a string1',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Console',
'SupportNobindOfDelayLoadedDLL': 'true',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TrackerLogDirectory': 'a_folder',
'TreatLinkerWarningAsErrors': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'AsInvoker',
'UACUIAccess': 'true',
'Version': 'a string1'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'Culture': '0x236',
'IgnoreStandardIncludePath': 'true',
'NullTerminateStrings': 'true',
'PreprocessorDefinitions': 'string1;string2',
'ResourceOutputFileName': 'a string1',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'UndefinePreprocessorDefinitions': 'string1;string2'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'ApplicationConfigurationMode': 'true',
'ClientStubFile': 'a_file_name',
'CPreprocessOptions': 'a string1',
'DefaultCharType': 'Signed',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'EnableCustom',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateClientFiles': 'Stub',
'GenerateServerFiles': 'None',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'LocaleID': '33',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a string1',
'PreprocessorDefinitions': 'string1;string2',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'ServerStubFile': 'a_file_name',
'StructMemberAlignment': 'NotSet',
'SuppressCompilerWarnings': 'true',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Itanium',
'TrackerLogDirectory': 'a_folder',
'TypeLibFormat': 'NewFormat',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'string1;string2',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '1'},
'Lib': {
'AdditionalDependencies': 'file1;file2',
'AdditionalLibraryDirectories': 'folder1;folder2',
'AdditionalOptions': 'a string1',
'DisplayLibrary': 'a string1',
'ErrorReporting': 'PromptImmediately',
'ExportNamedFunctions': 'string1;string2',
'ForceSymbolReferences': 'a string1',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2',
'LinkTimeCodeGeneration': 'true',
'MinimumRequiredVersion': 'a string1',
'ModuleDefinitionFile': 'a_file_name',
'Name': 'a_file_name',
'OutputFile': 'a_file_name',
'RemoveObjects': 'file1;file2',
'SubSystem': 'Console',
'SuppressStartupBanner': 'true',
'TargetMachine': 'MachineX86i',
'TrackerLogDirectory': 'a_folder',
'TreatLibWarningAsErrors': 'true',
'UseUnicodeResponseFiles': 'true',
'Verbose': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2',
'AdditionalOptions': 'a string1',
'AssemblyIdentity': 'a string1',
'ComponentFileName': 'a_file_name',
'EnableDPIAwareness': 'fal',
'GenerateCatalogFiles': 'truel',
'GenerateCategoryTags': 'true',
'InputResourceManifests': 'a string1',
'ManifestFromManagedAssembly': 'a_file_name',
'notgood3': 'bogus',
'OutputManifestFile': 'a_file_name',
'OutputResourceManifests': 'a string1',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressDependencyElement': 'true',
'SuppressStartupBanner': 'true',
'TrackerLogDirectory': 'a_folder',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'a_file_name'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}},
self.stderr)
self._ExpectedWarnings([
'Warning: unrecognized setting ClCompile/Enableprefast',
'Warning: unrecognized setting ClCompile/ZZXYZ',
'Warning: unrecognized setting Manifest/notgood3',
'Warning: for Manifest/GenerateCatalogFiles, '
"expected bool; got 'truel'",
'Warning: for Lib/TargetMachine, unrecognized enumerated value '
'MachineX86i',
"Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
def testConvertToMSBuildSettings_empty(self):
"""Tests an empty conversion."""
msvs_settings = {}
expected_msbuild_settings = {}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_minimal(self):
"""Tests a minimal conversion."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': '0',
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': '1',
'ErrorReporting': '1',
'DataExecutionPrevention': '2',
},
}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/foo',
'BasicRuntimeChecks': 'Default',
},
'Link': {
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'LinkErrorReporting': 'PromptImmediately',
'DataExecutionPrevention': 'true',
},
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_warnings(self):
"""Tests conversion that generates warnings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2',
# These are incorrect values:
'BasicRuntimeChecks': '12',
'BrowseInformation': '21',
'UsePrecompiledHeader': '13',
'GeneratePreprocessedFile': '14'},
'VCLinkerTool': {
# These are incorrect values:
'Driver': '10',
'LinkTimeCodeGeneration': '31',
'ErrorReporting': '21',
'FixedBaseAddress': '6'},
'VCResourceCompilerTool': {
# Custom
'Culture': '1003'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': '1',
'AdditionalOptions': '2'},
'Link': {},
'ResourceCompile': {
# Custom
'Culture': '0x03eb'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([
'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
'MSBuild, index value (12) not in expected range [0, 4)',
'Warning: while converting VCCLCompilerTool/BrowseInformation to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
'MSBuild, index value (13) not in expected range [0, 3)',
'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
'MSBuild, value must be one of [0, 1, 2]; got 14',
'Warning: while converting VCLinkerTool/Driver to '
'MSBuild, index value (10) not in expected range [0, 4)',
'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
'MSBuild, index value (31) not in expected range [0, 5)',
'Warning: while converting VCLinkerTool/ErrorReporting to '
'MSBuild, index value (21) not in expected range [0, 3)',
'Warning: while converting VCLinkerTool/FixedBaseAddress to '
'MSBuild, index value (6) not in expected range [0, 3)',
])
def testConvertToMSBuildSettings_full_synthetic(self):
"""Tests conversion of all the MSBuild settings."""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': '0',
'BasicRuntimeChecks': '1',
'BrowseInformation': '2',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': '0',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': '0',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '1',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '0',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'GeneratePreprocessedFile': '1',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '2',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderThrough': 'a_file_name',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': '0',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1',
'SuppressStartupBanner': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '1',
'UseUnicodeResponseFiles': 'true',
'WarnAsError': 'true',
'WarningLevel': '2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'VCLinkerTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '0',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': '1',
'CLRThreadAttribute': '2',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': '1',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '0',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'ErrorReporting': '0',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'file1;file2;file3',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': '2',
'LinkIncremental': '1',
'LinkLibraryDependencies': 'true',
'LinkTimeCodeGeneration': '2',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'OptimizeForWindows98': '1',
'OptimizeReferences': '0',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'true',
'ShowProgress': '0',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': '2',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '3',
'TerminalServerAware': '2',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': '1',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'false',
'UseUnicodeResponseFiles': 'true',
'Version': 'a_string'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '1003',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'VCMIDLTool': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': '0',
'DLLDataFileName': 'a_file_name',
'EnableErrorChecks': '2',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'true',
'TargetEnvironment': '1',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'VCLibrarianTool': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'LinkLibraryDependencies': 'true',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'DependencyInformationFile': 'a_file_name',
'EmbedManifest': 'true',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'ManifestResourceFile': 'my_name',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'true',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string /J',
'AdditionalUsingDirectories': 'folder1;folder2;folder3',
'AssemblerListingLocation': 'a_file_name',
'AssemblerOutput': 'NoListing',
'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
'BrowseInformation': 'true',
'BrowseInformationFile': 'a_file_name',
'BufferSecurityCheck': 'true',
'CallingConvention': 'Cdecl',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'd1;d2;d3',
'EnableEnhancedInstructionSet': 'NotSet',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Prompt',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Neither',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'true',
'ForcedIncludeFiles': 'file1;file2;file3',
'ForcedUsingFiles': 'file1;file2;file3',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'AnySuitable',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': 'a_file_name',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'Create',
'PrecompiledHeaderFile': 'a_file_name',
'PrecompiledHeaderOutputFile': 'a_file_name',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'PreprocessSuppressLineNumbers': 'false',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': 'a_file_name',
'RuntimeLibrary': 'MultiThreaded',
'RuntimeTypeInfo': 'true',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '1Byte',
'SuppressStartupBanner': 'true',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'true',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'UseFullPaths': 'true',
'WarningLevel': 'Level2',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': 'a_file_name'},
'Link': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalManifestDependencies': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AddModuleNamesToAssembly': 'file1;file2;file3',
'AllowIsolation': 'true',
'AssemblyDebug': '',
'AssemblyLinkResource': 'file1;file2;file3',
'BaseAddress': 'a_string',
'CLRImageType': 'ForceIJWImage',
'CLRThreadAttribute': 'STAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'file1;file2;file3',
'DelaySign': 'true',
'Driver': 'Driver',
'EmbedManagedResourceFile': 'file1;file2;file3',
'EnableCOMDATFolding': '',
'EnableUAC': 'true',
'EntryPointSymbol': 'a_string',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'file1;file2;file3',
'FunctionOrder': 'a_file_name',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': 'a_string',
'HeapReserveSize': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ImportLibrary': 'a_file_name',
'KeyContainer': 'a_file_name',
'KeyFile': 'a_file_name',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'NoErrorReport',
'LinkTimeCodeGeneration': 'PGInstrument',
'ManifestFile': 'a_file_name',
'MapExports': 'true',
'MapFileName': 'a_file_name',
'MergedIDLBaseFileName': 'a_file_name',
'MergeSections': 'a_string',
'MidlCommandFile': 'a_file_name',
'ModuleDefinitionFile': 'a_file_name',
'NoEntryPoint': 'true',
'OptimizeReferences': '',
'OutputFile': 'a_file_name',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': 'a_file_name',
'ProgramDatabaseFile': 'a_file_name',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'true',
'ShowProgress': 'NotSet',
'StackCommitSize': 'a_string',
'StackReserveSize': 'a_string',
'StripPrivateSymbols': 'a_file_name',
'SubSystem': 'Windows',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'true',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineARM',
'TerminalServerAware': 'true',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'a_file_name',
'TypeLibraryResourceID': '33',
'UACExecutionLevel': 'HighestAvailable',
'UACUIAccess': 'true',
'Version': 'a_string'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'Culture': '0x03eb',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': 'd1;d2;d3',
'ResourceOutputFileName': 'a_string',
'ShowProgress': 'true',
'SuppressStartupBanner': 'true',
'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
'Midl': {
'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'CPreprocessOptions': 'a_string',
'DefaultCharType': 'Unsigned',
'DllDataFileName': 'a_file_name',
'EnableErrorChecks': 'All',
'ErrorCheckAllocations': 'true',
'ErrorCheckBounds': 'true',
'ErrorCheckEnumRange': 'true',
'ErrorCheckRefPointers': 'true',
'ErrorCheckStubData': 'true',
'GenerateStublessProxies': 'true',
'GenerateTypeLibrary': 'true',
'HeaderFileName': 'a_file_name',
'IgnoreStandardIncludePath': 'true',
'InterfaceIdentifierFileName': 'a_file_name',
'MkTypLibCompatible': 'true',
'OutputDirectory': 'a_string',
'PreprocessorDefinitions': 'd1;d2;d3',
'ProxyFileName': 'a_file_name',
'RedirectOutputAndErrors': 'a_file_name',
'StructMemberAlignment': '4',
'SuppressStartupBanner': 'true',
'TargetEnvironment': 'Win32',
'TypeLibraryName': 'a_file_name',
'UndefinePreprocessorDefinitions': 'd1;d2;d3',
'ValidateAllParameters': 'true',
'WarnAsError': 'true',
'WarningLevel': '4'},
'Lib': {
'AdditionalDependencies': 'file1;file2;file3',
'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
'AdditionalOptions': 'a_string',
'ExportNamedFunctions': 'd1;d2;d3',
'ForceSymbolReferences': 'a_string',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
'ModuleDefinitionFile': 'a_file_name',
'OutputFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'UseUnicodeResponseFiles': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'file1;file2;file3',
'AdditionalOptions': 'a_string',
'AssemblyIdentity': 'a_string',
'ComponentFileName': 'a_file_name',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'a_string',
'OutputManifestFile': 'a_file_name',
'RegistrarScriptFile': 'a_file_name',
'ReplacementsFile': 'a_file_name',
'SuppressStartupBanner': 'true',
'TypeLibraryFile': 'a_file_name',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'a_file_name',
'VerboseOutput': 'true'},
'ManifestResourceCompile': {
'ResourceOutputFileName': 'my_name'},
'ProjectReference': {
'LinkLibraryDependencies': 'true',
'UseLibraryDependencyInputs': 'false'},
'': {
'EmbedManifest': 'true',
'GenerateManifest': 'true',
'IgnoreImportLibrary': 'true',
'LinkIncremental': 'false'}}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
def testConvertToMSBuildSettings_actual(self):
"""Tests the conversion of an actual project.
A VS2008 project with most of the options defined was created through the
VS2008 IDE. It was then converted to VS2010. The tool settings found in
the .vcproj and .vcxproj files were converted to the two dictionaries
msvs_settings and expected_msbuild_settings.
Note that for many settings, the VS2010 converter adds macros like
%(AdditionalIncludeDirectories) to make sure than inherited values are
included. Since the Gyp projects we generate do not use inheritance,
we removed these macros. They were:
ClCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
AdditionalOptions: ' %(AdditionalOptions)'
AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
ForcedUsingFiles: ';%(ForcedUsingFiles)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
UndefinePreprocessorDefinitions:
';%(UndefinePreprocessorDefinitions)',
Link:
AdditionalDependencies: ';%(AdditionalDependencies)',
AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
AdditionalManifestDependencies:
';%(AdditionalManifestDependencies)',
AdditionalOptions: ' %(AdditionalOptions)',
AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
AssemblyLinkResource: ';%(AssemblyLinkResource)',
DelayLoadDLLs: ';%(DelayLoadDLLs)',
EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
ForceSymbolReferences: ';%(ForceSymbolReferences)',
IgnoreSpecificDefaultLibraries:
';%(IgnoreSpecificDefaultLibraries)',
ResourceCompile:
AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
AdditionalOptions: ' %(AdditionalOptions)',
PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
Manifest:
AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
AdditionalOptions: ' %(AdditionalOptions)',
InputResourceManifests: ';%(InputResourceManifests)',
"""
msvs_settings = {
'VCCLCompilerTool': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)\\a',
'AssemblerOutput': '1',
'BasicRuntimeChecks': '3',
'BrowseInformation': '1',
'BrowseInformationFile': '$(IntDir)\\e',
'BufferSecurityCheck': 'false',
'CallingConvention': '1',
'CompileAs': '1',
'DebugInformationFormat': '4',
'DefaultCharIsUnsigned': 'true',
'Detect64BitPortabilityProblems': 'true',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': '1',
'EnableFiberSafeOptimizations': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'EnablePREfast': 'true',
'ErrorReporting': '2',
'ExceptionHandling': '2',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': '2',
'FloatingPointExceptions': 'true',
'FloatingPointModel': '1',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'GeneratePreprocessedFile': '2',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': '1',
'KeepComments': 'true',
'MinimalRebuild': 'true',
'ObjectFile': '$(IntDir)\\b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMP': 'true',
'Optimization': '3',
'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
'PrecompiledHeaderThrough': 'StdAfx.hd',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
'RuntimeLibrary': '3',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '3',
'SuppressStartupBanner': 'false',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'UsePrecompiledHeader': '0',
'UseUnicodeResponseFiles': 'false',
'WarnAsError': 'true',
'WarningLevel': '3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)\\c'},
'VCLinkerTool': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': '1',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': '3',
'CLRThreadAttribute': '1',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '0',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': '2',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': '1',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'ErrorReporting': '2',
'FixedBaseAddress': '1',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateManifest': 'false',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreDefaultLibraryNames': 'flob;flok',
'IgnoreEmbeddedIDL': 'true',
'IgnoreImportLibrary': 'true',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': '2',
'LinkIncremental': '0',
'LinkLibraryDependencies': 'false',
'LinkTimeCodeGeneration': '1',
'ManifestFile':
'$(IntDir)\\$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'OptimizeForWindows98': '2',
'OptimizeReferences': '2',
'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': '1',
'RegisterOutput': 'true',
'ResourceOnlyDLL': 'true',
'SetChecksum': 'false',
'ShowProgress': '1',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': '1',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNet': 'true',
'TargetMachine': '1',
'TerminalServerAware': '1',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': '2',
'UACUIAccess': 'true',
'UseLibraryDependencyInputs': 'true',
'UseUnicodeResponseFiles': 'false',
'Version': '333'},
'VCResourceCompilerTool': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '3084',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
'ShowProgress': 'true'},
'VCManifestTool': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
'EmbedManifest': 'false',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'ManifestResourceFile':
'$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'UseFAT32Workaround': 'true',
'UseUnicodeResponseFiles': 'false',
'VerboseOutput': 'true'}}
expected_msbuild_settings = {
'ClCompile': {
'AdditionalIncludeDirectories': 'dir1',
'AdditionalOptions': '/more /J',
'AdditionalUsingDirectories': 'test',
'AssemblerListingLocation': '$(IntDir)a',
'AssemblerOutput': 'AssemblyCode',
'BasicRuntimeChecks': 'EnableFastChecks',
'BrowseInformation': 'true',
'BrowseInformationFile': '$(IntDir)e',
'BufferSecurityCheck': 'false',
'CallingConvention': 'FastCall',
'CompileAs': 'CompileAsC',
'DebugInformationFormat': 'EditAndContinue',
'DisableLanguageExtensions': 'true',
'DisableSpecificWarnings': 'abc',
'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
'EnableFiberSafeOptimizations': 'true',
'EnablePREfast': 'true',
'ErrorReporting': 'Queue',
'ExceptionHandling': 'Async',
'ExpandAttributedSource': 'true',
'FavorSizeOrSpeed': 'Size',
'FloatingPointExceptions': 'true',
'FloatingPointModel': 'Strict',
'ForceConformanceInForLoopScope': 'false',
'ForcedIncludeFiles': 'def',
'ForcedUsingFiles': 'ge',
'FunctionLevelLinking': 'true',
'GenerateXMLDocumentationFiles': 'true',
'IgnoreStandardIncludePath': 'true',
'InlineFunctionExpansion': 'OnlyExplicitInline',
'IntrinsicFunctions': 'true',
'MinimalRebuild': 'true',
'ObjectFileName': '$(IntDir)b',
'OmitDefaultLibName': 'true',
'OmitFramePointers': 'true',
'OpenMPSupport': 'true',
'Optimization': 'Full',
'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
'PrecompiledHeaderFile': 'StdAfx.hd',
'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
'PreprocessKeepComments': 'true',
'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
'PreprocessSuppressLineNumbers': 'true',
'PreprocessToFile': 'true',
'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
'RuntimeLibrary': 'MultiThreadedDebugDLL',
'RuntimeTypeInfo': 'false',
'ShowIncludes': 'true',
'SmallerTypeCheck': 'true',
'StringPooling': 'true',
'StructMemberAlignment': '4Bytes',
'SuppressStartupBanner': 'false',
'TreatWarningAsError': 'true',
'TreatWChar_tAsBuiltInType': 'false',
'UndefineAllPreprocessorDefinitions': 'true',
'UndefinePreprocessorDefinitions': 'wer',
'UseFullPaths': 'true',
'WarningLevel': 'Level3',
'WholeProgramOptimization': 'true',
'XMLDocumentationFileName': '$(IntDir)c'},
'Link': {
'AdditionalDependencies': 'zx',
'AdditionalLibraryDirectories': 'asd',
'AdditionalManifestDependencies': 's2',
'AdditionalOptions': '/mor2',
'AddModuleNamesToAssembly': 'd1',
'AllowIsolation': 'false',
'AssemblyDebug': 'true',
'AssemblyLinkResource': 'd5',
'BaseAddress': '23423',
'CLRImageType': 'ForceSafeILImage',
'CLRThreadAttribute': 'MTAThreadingAttribute',
'CLRUnmanagedCodeCheck': 'true',
'DataExecutionPrevention': '',
'DelayLoadDLLs': 'd4',
'DelaySign': 'true',
'Driver': 'UpOnly',
'EmbedManagedResourceFile': 'd2',
'EnableCOMDATFolding': 'false',
'EnableUAC': 'false',
'EntryPointSymbol': 'f5',
'FixedBaseAddress': 'false',
'ForceSymbolReferences': 'd3',
'FunctionOrder': 'fssdfsd',
'GenerateDebugInformation': 'true',
'GenerateMapFile': 'true',
'HeapCommitSize': '13',
'HeapReserveSize': '12',
'IgnoreAllDefaultLibraries': 'true',
'IgnoreEmbeddedIDL': 'true',
'IgnoreSpecificDefaultLibraries': 'flob;flok',
'ImportLibrary': 'f4',
'KeyContainer': 'f7',
'KeyFile': 'f6',
'LargeAddressAware': 'true',
'LinkErrorReporting': 'QueueForNextLogin',
'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
'MapExports': 'true',
'MapFileName': 'd5',
'MergedIDLBaseFileName': 'f2',
'MergeSections': 'f5',
'MidlCommandFile': 'f1',
'ModuleDefinitionFile': 'sdsd',
'NoEntryPoint': 'true',
'OptimizeReferences': 'true',
'OutputFile': '$(OutDir)$(ProjectName)2.exe',
'PerUserRedirection': 'true',
'Profile': 'true',
'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
'ProgramDatabaseFile': 'Flob.pdb',
'RandomizedBaseAddress': 'false',
'RegisterOutput': 'true',
'SetChecksum': 'false',
'ShowProgress': 'LinkVerbose',
'StackCommitSize': '15',
'StackReserveSize': '14',
'StripPrivateSymbols': 'd3',
'SubSystem': 'Console',
'SupportUnloadOfDelayLoadedDLL': 'true',
'SuppressStartupBanner': 'false',
'SwapRunFromCD': 'true',
'SwapRunFromNET': 'true',
'TargetMachine': 'MachineX86',
'TerminalServerAware': 'false',
'TurnOffAssemblyGeneration': 'true',
'TypeLibraryFile': 'f3',
'TypeLibraryResourceID': '12',
'UACExecutionLevel': 'RequireAdministrator',
'UACUIAccess': 'true',
'Version': '333'},
'ResourceCompile': {
'AdditionalIncludeDirectories': 'f3',
'AdditionalOptions': '/more3',
'Culture': '0x0c0c',
'IgnoreStandardIncludePath': 'true',
'PreprocessorDefinitions': '_UNICODE;UNICODE2',
'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
'ShowProgress': 'true'},
'Manifest': {
'AdditionalManifestFiles': 'sfsdfsd',
'AdditionalOptions': 'afdsdafsd',
'AssemblyIdentity': 'sddfdsadfsa',
'ComponentFileName': 'fsdfds',
'GenerateCatalogFiles': 'true',
'InputResourceManifests': 'asfsfdafs',
'OutputManifestFile': '$(TargetPath).manifestdfs',
'RegistrarScriptFile': 'sdfsfd',
'ReplacementsFile': 'sdffsd',
'SuppressStartupBanner': 'false',
'TypeLibraryFile': 'sfsd',
'UpdateFileHashes': 'true',
'UpdateFileHashesSearchPath': 'sfsd',
'VerboseOutput': 'true'},
'ProjectReference': {
'LinkLibraryDependencies': 'false',
'UseLibraryDependencyInputs': 'true'},
'': {
'EmbedManifest': 'false',
'GenerateManifest': 'false',
'IgnoreImportLibrary': 'true',
'LinkIncremental': ''
},
'ManifestResourceCompile': {
'ResourceOutputFileName':
'$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
}
actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
msvs_settings,
self.stderr)
self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
self._ExpectedWarnings([])
if __name__ == '__main__':
unittest.main()
|
coderbone/SickRage-alt
|
refs/heads/master
|
lib/imdb/__init__.py
|
11
|
"""
imdb package.
This package can be used to retrieve information about a movie or
a person from the IMDb database.
It can fetch data through different media (e.g.: the IMDb web pages,
a SQL database, etc.)
Copyright 2004-2016 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__all__ = ['IMDb', 'IMDbError', 'Movie', 'Person', 'Character', 'Company',
'available_access_systems']
__version__ = VERSION = '5.2dev20161118'
# Import compatibility module (importing it is enough).
import _compat
import sys, os, ConfigParser, logging
from types import MethodType
from imdb import Movie, Person, Character, Company
import imdb._logging
from imdb._exceptions import IMDbError, IMDbDataAccessError, IMDbParserError
from imdb.utils import build_title, build_name, build_company_name
_aux_logger = logging.getLogger('imdbpy.aux')
# URLs of the main pages for movies, persons, characters and queries.
imdbURL_base = 'http://akas.imdb.com/'
# NOTE: the urls below will be removed in a future version.
# please use the values in the 'urls' attribute
# of the IMDbBase subclass instance.
# http://akas.imdb.com/title/
imdbURL_movie_base = '%stitle/' % imdbURL_base
# http://akas.imdb.com/title/tt%s/
imdbURL_movie_main = imdbURL_movie_base + 'tt%s/'
# http://akas.imdb.com/name/
imdbURL_person_base = '%sname/' % imdbURL_base
# http://akas.imdb.com/name/nm%s/
imdbURL_person_main = imdbURL_person_base + 'nm%s/'
# http://akas.imdb.com/character/
imdbURL_character_base = '%scharacter/' % imdbURL_base
# http://akas.imdb.com/character/ch%s/
imdbURL_character_main = imdbURL_character_base + 'ch%s/'
# http://akas.imdb.com/company/
imdbURL_company_base = '%scompany/' % imdbURL_base
# http://akas.imdb.com/company/co%s/
imdbURL_company_main = imdbURL_company_base + 'co%s/'
# http://akas.imdb.com/keyword/%s/
imdbURL_keyword_main = imdbURL_base + 'keyword/%s/'
# http://akas.imdb.com/chart/top
imdbURL_top250 = imdbURL_base + 'chart/top'
# http://akas.imdb.com/chart/bottom
imdbURL_bottom100 = imdbURL_base + 'chart/bottom'
# http://akas.imdb.com/find?%s
imdbURL_find = imdbURL_base + 'find?%s'
# Name of the configuration file.
confFileName = 'imdbpy.cfg'
class ConfigParserWithCase(ConfigParser.ConfigParser):
"""A case-sensitive parser for configuration files."""
def __init__(self, defaults=None, confFile=None, *args, **kwds):
"""Initialize the parser.
*defaults* -- defaults values.
*confFile* -- the file (or list of files) to parse."""
ConfigParser.ConfigParser.__init__(self, defaults=defaults)
if confFile is None:
dotFileName = '.' + confFileName
# Current and home directory.
confFile = [os.path.join(os.getcwd(), confFileName),
os.path.join(os.getcwd(), dotFileName),
os.path.join(os.path.expanduser('~'), confFileName),
os.path.join(os.path.expanduser('~'), dotFileName)]
if os.name == 'posix':
sep = getattr(os.path, 'sep', '/')
# /etc/ and /etc/conf.d/
confFile.append(os.path.join(sep, 'etc', confFileName))
confFile.append(os.path.join(sep, 'etc', 'conf.d',
confFileName))
else:
# etc subdirectory of sys.prefix, for non-unix systems.
confFile.append(os.path.join(sys.prefix, 'etc', confFileName))
for fname in confFile:
try:
self.read(fname)
except (ConfigParser.MissingSectionHeaderError,
ConfigParser.ParsingError), e:
_aux_logger.warn('Troubles reading config file: %s' % e)
# Stop at the first valid file.
if self.has_section('imdbpy'):
break
def optionxform(self, optionstr):
"""Option names are case sensitive."""
return optionstr
def _manageValue(self, value):
"""Custom substitutions for values."""
if not isinstance(value, (str, unicode)):
return value
vlower = value.lower()
if vlower in self._boolean_states:
return self._boolean_states[vlower]
elif vlower == 'none':
return None
return value
def get(self, section, option, *args, **kwds):
"""Return the value of an option from a given section."""
value = ConfigParser.ConfigParser.get(self, section, option,
*args, **kwds)
return self._manageValue(value)
def items(self, section, *args, **kwds):
"""Return a list of (key, value) tuples of items of the
given section."""
if section != 'DEFAULT' and not self.has_section(section):
return []
keys = ConfigParser.ConfigParser.options(self, section)
return [(k, self.get(section, k, *args, **kwds)) for k in keys]
def getDict(self, section):
"""Return a dictionary of items of the specified section."""
return dict(self.items(section))
def IMDb(accessSystem=None, *arguments, **keywords):
"""Return an instance of the appropriate class.
The accessSystem parameter is used to specify the kind of
the preferred access system."""
if accessSystem is None or accessSystem in ('auto', 'config'):
try:
cfg_file = ConfigParserWithCase(*arguments, **keywords)
# Parameters set by the code take precedence.
kwds = cfg_file.getDict('imdbpy')
if 'accessSystem' in kwds:
accessSystem = kwds['accessSystem']
del kwds['accessSystem']
else:
accessSystem = 'http'
kwds.update(keywords)
keywords = kwds
except Exception, e:
import logging
logging.getLogger('imdbpy').warn('Unable to read configuration' \
' file; complete error: %s' % e)
# It just LOOKS LIKE a bad habit: we tried to read config
# options from some files, but something is gone horribly
# wrong: ignore everything and pretend we were called with
# the 'http' accessSystem.
accessSystem = 'http'
if 'loggingLevel' in keywords:
imdb._logging.setLevel(keywords['loggingLevel'])
del keywords['loggingLevel']
if 'loggingConfig' in keywords:
logCfg = keywords['loggingConfig']
del keywords['loggingConfig']
try:
import logging.config
logging.config.fileConfig(os.path.expanduser(logCfg))
except Exception, e:
logging.getLogger('imdbpy').warn('unable to read logger ' \
'config: %s' % e)
if accessSystem in ('httpThin', 'webThin', 'htmlThin'):
logging.warn('httpThin was removed since IMDbPY 4.8')
accessSystem = 'http'
if accessSystem in ('http', 'web', 'html'):
from parser.http import IMDbHTTPAccessSystem
return IMDbHTTPAccessSystem(*arguments, **keywords)
elif accessSystem in ('mobile',):
from parser.mobile import IMDbMobileAccessSystem
return IMDbMobileAccessSystem(*arguments, **keywords)
elif accessSystem in ('local', 'files'):
# The local access system was removed since IMDbPY 4.2.
raise IMDbError('the local access system was removed since IMDbPY 4.2')
elif accessSystem in ('sql', 'db', 'database'):
try:
from parser.sql import IMDbSqlAccessSystem
except ImportError:
raise IMDbError('the sql access system is not installed')
return IMDbSqlAccessSystem(*arguments, **keywords)
else:
raise IMDbError('unknown kind of data access system: "%s"' \
% accessSystem)
def available_access_systems():
"""Return the list of available data access systems."""
asList = []
# XXX: trying to import modules is a good thing?
try:
from parser.http import IMDbHTTPAccessSystem
asList.append('http')
except ImportError:
pass
try:
from parser.mobile import IMDbMobileAccessSystem
asList.append('mobile')
except ImportError:
pass
try:
from parser.sql import IMDbSqlAccessSystem
asList.append('sql')
except ImportError:
pass
return asList
# XXX: I'm not sure this is a good guess.
# I suppose that an argument of the IMDb function can be used to
# set a default encoding for the output, and then Movie, Person and
# Character objects can use this default encoding, returning strings.
# Anyway, passing unicode strings to search_movie(), search_person()
# and search_character() methods is always safer.
encoding = getattr(sys.stdin, 'encoding', '') or sys.getdefaultencoding()
class IMDbBase:
"""The base class used to search for a movie/person/character and
to get a Movie/Person/Character object.
This class cannot directly fetch data of any kind and so you
have to search the "real" code into a subclass."""
# The name of the preferred access system (MUST be overridden
# in the subclasses).
accessSystem = 'UNKNOWN'
# Top-level logger for IMDbPY.
_imdb_logger = logging.getLogger('imdbpy')
# Whether to re-raise caught exceptions or not.
_reraise_exceptions = False
def __init__(self, defaultModFunct=None, results=20, keywordsResults=100,
*arguments, **keywords):
"""Initialize the access system.
If specified, defaultModFunct is the function used by
default by the Person, Movie and Character objects, when
accessing their text fields.
"""
# The function used to output the strings that need modification (the
# ones containing references to movie titles and person names).
self._defModFunct = defaultModFunct
# Number of results to get.
try:
results = int(results)
except (TypeError, ValueError):
results = 20
if results < 1:
results = 20
self._results = results
try:
keywordsResults = int(keywordsResults)
except (TypeError, ValueError):
keywordsResults = 100
if keywordsResults < 1:
keywordsResults = 100
self._keywordsResults = keywordsResults
self._reraise_exceptions = keywords.get('reraiseExceptions') or False
self.set_imdb_urls(keywords.get('imdbURL_base') or imdbURL_base)
def set_imdb_urls(self, imdbURL_base):
"""Set the urls used accessing the IMDb site."""
imdbURL_base = imdbURL_base.strip().strip('"\'')
if not imdbURL_base.startswith('http://'):
imdbURL_base = 'http://%s' % imdbURL_base
if not imdbURL_base.endswith('/'):
imdbURL_base = '%s/' % imdbURL_base
# http://akas.imdb.com/title/
imdbURL_movie_base='%stitle/' % imdbURL_base
# http://akas.imdb.com/title/tt%s/
imdbURL_movie_main=imdbURL_movie_base + 'tt%s/'
# http://akas.imdb.com/name/
imdbURL_person_base='%sname/' % imdbURL_base
# http://akas.imdb.com/name/nm%s/
imdbURL_person_main=imdbURL_person_base + 'nm%s/'
# http://akas.imdb.com/character/
imdbURL_character_base='%scharacter/' % imdbURL_base
# http://akas.imdb.com/character/ch%s/
imdbURL_character_main=imdbURL_character_base + 'ch%s/'
# http://akas.imdb.com/company/
imdbURL_company_base='%scompany/' % imdbURL_base
# http://akas.imdb.com/company/co%s/
imdbURL_company_main=imdbURL_company_base + 'co%s/'
# http://akas.imdb.com/keyword/%s/
imdbURL_keyword_main=imdbURL_base + 'keyword/%s/'
# http://akas.imdb.com/chart/top
imdbURL_top250=imdbURL_base + 'chart/top'
# http://akas.imdb.com/chart/bottom
imdbURL_bottom100=imdbURL_base + 'chart/bottom'
# http://akas.imdb.com/find?%s
imdbURL_find=imdbURL_base + 'find?%s'
self.urls = dict(
movie_base=imdbURL_movie_base,
movie_main=imdbURL_movie_main,
person_base=imdbURL_person_base,
person_main=imdbURL_person_main,
character_base=imdbURL_character_base,
character_main=imdbURL_character_main,
company_base=imdbURL_company_base,
company_main=imdbURL_company_main,
keyword_main=imdbURL_keyword_main,
top250=imdbURL_top250,
bottom100=imdbURL_bottom100,
find=imdbURL_find)
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
# By default, do nothing.
return movieID
def _normalize_personID(self, personID):
"""Normalize the given personID."""
# By default, do nothing.
return personID
def _normalize_characterID(self, characterID):
"""Normalize the given characterID."""
# By default, do nothing.
return characterID
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
# By default, do nothing.
return companyID
def _get_real_movieID(self, movieID):
"""Handle title aliases."""
# By default, do nothing.
return movieID
def _get_real_personID(self, personID):
"""Handle name aliases."""
# By default, do nothing.
return personID
def _get_real_characterID(self, characterID):
"""Handle character name aliases."""
# By default, do nothing.
return characterID
def _get_real_companyID(self, companyID):
"""Handle company name aliases."""
# By default, do nothing.
return companyID
def _get_infoset(self, prefname):
"""Return methods with the name starting with prefname."""
infoset = []
excludes = ('%sinfoset' % prefname,)
preflen = len(prefname)
for name in dir(self.__class__):
if name.startswith(prefname) and name not in excludes:
member = getattr(self.__class__, name)
if isinstance(member, MethodType):
infoset.append(name[preflen:].replace('_', ' '))
return infoset
def get_movie_infoset(self):
"""Return the list of info set available for movies."""
return self._get_infoset('get_movie_')
def get_person_infoset(self):
"""Return the list of info set available for persons."""
return self._get_infoset('get_person_')
def get_character_infoset(self):
"""Return the list of info set available for characters."""
return self._get_infoset('get_character_')
def get_company_infoset(self):
"""Return the list of info set available for companies."""
return self._get_infoset('get_company_')
def get_movie(self, movieID, info=Movie.Movie.default_info, modFunct=None):
"""Return a Movie object for the given movieID.
The movieID is something used to univocally identify a movie;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Movie
object when accessing its text fields (like 'plot')."""
movieID = self._normalize_movieID(movieID)
movieID = self._get_real_movieID(movieID)
movie = Movie.Movie(movieID=movieID, accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
movie.set_mod_funct(modFunct)
self.update(movie, info)
return movie
get_episode = get_movie
def _search_movie(self, title, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_movie(self, title, results=None, _episodes=False):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
# XXX: I suppose it will be much safer if the user provides
# an unicode string... this is just a guess.
if not isinstance(title, unicode):
title = unicode(title, encoding, 'replace')
if not _episodes:
res = self._search_movie(title, results)
else:
res = self._search_episode(title, results)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res][:results]
def _search_episode(self, title, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_episode(self, title, results=None):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return;
this method searches only for titles of tv (mini) series' episodes."""
return self.search_movie(title, results=results, _episodes=True)
def get_person(self, personID, info=Person.Person.default_info,
modFunct=None):
"""Return a Person object for the given personID.
The personID is something used to univocally identify a person;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Person
object when accessing its text fields (like 'mini biography')."""
personID = self._normalize_personID(personID)
personID = self._get_real_personID(personID)
person = Person.Person(personID=personID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
person.set_mod_funct(modFunct)
self.update(person, info)
return person
def _search_person(self, name, results):
"""Return a list of tuples (personID, {personData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_person(self, name, results=None):
"""Return a list of Person objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_person(name, results)
return [Person.Person(personID=self._get_real_personID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def get_character(self, characterID, info=Character.Character.default_info,
modFunct=None):
"""Return a Character object for the given characterID.
The characterID is something used to univocally identify a character;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Character
object when accessing its text fields (like 'biography')."""
characterID = self._normalize_characterID(characterID)
characterID = self._get_real_characterID(characterID)
character = Character.Character(characterID=characterID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
character.set_mod_funct(modFunct)
self.update(character, info)
return character
def _search_character(self, name, results):
"""Return a list of tuples (characterID, {characterData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_character(self, name, results=None):
"""Return a list of Character objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_character(name, results)
return [Character.Character(characterID=self._get_real_characterID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def get_company(self, companyID, info=Company.Company.default_info,
modFunct=None):
"""Return a Company object for the given companyID.
The companyID is something used to univocally identify a company;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Company
object when accessing its text fields (none, so far)."""
companyID = self._normalize_companyID(companyID)
companyID = self._get_real_companyID(companyID)
company = Company.Company(companyID=companyID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
company.set_mod_funct(modFunct)
self.update(company, info)
return company
def _search_company(self, name, results):
"""Return a list of tuples (companyID, {companyData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_company(self, name, results=None):
"""Return a list of Company objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_company(name, results)
return [Company.Company(companyID=self._get_real_companyID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def _search_keyword(self, keyword, results):
"""Return a list of 'keyword' strings."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def search_keyword(self, keyword, results=None):
"""Search for existing keywords, similar to the given one."""
if results is None:
results = self._keywordsResults
try:
results = int(results)
except (ValueError, OverflowError):
results = 100
if not isinstance(keyword, unicode):
keyword = unicode(keyword, encoding, 'replace')
return self._search_keyword(keyword, results)
def _get_keyword(self, keyword, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_keyword(self, keyword, results=None):
"""Return a list of movies for the given keyword."""
if results is None:
results = self._keywordsResults
try:
results = int(results)
except (ValueError, OverflowError):
results = 100
# XXX: I suppose it will be much safer if the user provides
# an unicode string... this is just a guess.
if not isinstance(keyword, unicode):
keyword = unicode(keyword, encoding, 'replace')
res = self._get_keyword(keyword, results)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res][:results]
def _get_top_bottom_movies(self, kind):
"""Return the list of the top 250 or bottom 100 movies."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
# This method must return a list of (movieID, {movieDict})
# tuples. The kind parameter can be 'top' or 'bottom'.
raise NotImplementedError('override this method')
def get_top250_movies(self):
"""Return the list of the top 250 movies."""
res = self._get_top_bottom_movies('top')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_bottom100_movies(self):
"""Return the list of the bottom 100 movies."""
res = self._get_top_bottom_movies('bottom')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def new_movie(self, *arguments, **keywords):
"""Return a Movie object."""
# XXX: not really useful...
if 'title' in keywords:
if not isinstance(keywords['title'], unicode):
keywords['title'] = unicode(keywords['title'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Movie.Movie(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_person(self, *arguments, **keywords):
"""Return a Person object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Person.Person(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_character(self, *arguments, **keywords):
"""Return a Character object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Character.Character(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_company(self, *arguments, **keywords):
"""Return a Company object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Company.Company(accessSystem=self.accessSystem,
*arguments, **keywords)
def update(self, mop, info=None, override=0):
"""Given a Movie, Person, Character or Company object with only
partial information, retrieve the required set of information.
info is the list of sets of information to retrieve.
If override is set, the information are retrieved and updated
even if they're already in the object."""
# XXX: should this be a method of the Movie/Person/Character/Company
# classes? NO! What for instances created by external functions?
mopID = None
prefix = ''
if isinstance(mop, Movie.Movie):
mopID = mop.movieID
prefix = 'movie'
elif isinstance(mop, Person.Person):
mopID = mop.personID
prefix = 'person'
elif isinstance(mop, Character.Character):
mopID = mop.characterID
prefix = 'character'
elif isinstance(mop, Company.Company):
mopID = mop.companyID
prefix = 'company'
else:
raise IMDbError('object ' + repr(mop) + \
' is not a Movie, Person, Character or Company instance')
if mopID is None:
# XXX: enough? It's obvious that there are Characters
# objects without characterID, so I think they should
# just do nothing, when an i.update(character) is tried.
if prefix == 'character':
return
raise IMDbDataAccessError( \
'the supplied object has null movieID, personID or companyID')
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
if info is None:
info = mop.default_info
elif info == 'all':
if isinstance(mop, Movie.Movie):
info = self.get_movie_infoset()
elif isinstance(mop, Person.Person):
info = self.get_person_infoset()
elif isinstance(mop, Character.Character):
info = self.get_character_infoset()
else:
info = self.get_company_infoset()
if not isinstance(info, (tuple, list)):
info = (info,)
res = {}
for i in info:
if i in mop.current_info and not override:
continue
if not i:
continue
self._imdb_logger.debug('retrieving "%s" info set', i)
try:
method = getattr(aSystem, 'get_%s_%s' %
(prefix, i.replace(' ', '_')))
except AttributeError:
self._imdb_logger.error('unknown information set "%s"', i)
# Keeps going.
method = lambda *x: {}
try:
ret = method(mopID)
except Exception, e:
self._imdb_logger.critical('caught an exception retrieving ' \
'or parsing "%s" info set for mopID ' \
'"%s" (accessSystem: %s)',
i, mopID, mop.accessSystem, exc_info=True)
ret = {}
# If requested by the user, reraise the exception.
if self._reraise_exceptions:
raise
keys = None
if 'data' in ret:
res.update(ret['data'])
if isinstance(ret['data'], dict):
keys = ret['data'].keys()
if 'info sets' in ret:
for ri in ret['info sets']:
mop.add_to_current_info(ri, keys, mainInfoset=i)
else:
mop.add_to_current_info(i, keys)
if 'titlesRefs' in ret:
mop.update_titlesRefs(ret['titlesRefs'])
if 'namesRefs' in ret:
mop.update_namesRefs(ret['namesRefs'])
if 'charactersRefs' in ret:
mop.update_charactersRefs(ret['charactersRefs'])
mop.set_data(res, override=0)
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbPersonID(self, personID):
"""Translate a personID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError('override this method')
def _searchIMDb(self, kind, ton, title_kind=None):
"""Search the IMDb akas server for the given title or name."""
# The Exact Primary search system has gone AWOL, so we resort
# to the mobile search. :-/
if not ton:
return None
ton = ton.strip('"')
aSystem = IMDb('mobile')
if kind == 'tt':
searchFunct = aSystem.search_movie
check = 'long imdb title'
elif kind == 'nm':
searchFunct = aSystem.search_person
check = 'long imdb name'
elif kind == 'char':
searchFunct = aSystem.search_character
check = 'long imdb name'
elif kind == 'co':
# XXX: are [COUNTRY] codes included in the results?
searchFunct = aSystem.search_company
check = 'long imdb name'
try:
searchRes = searchFunct(ton)
except IMDbError:
return None
# When only one result is returned, assume it was from an
# exact match.
if len(searchRes) == 1:
return searchRes[0].getID()
title_only_matches = []
for item in searchRes:
# Return the first perfect match.
if item[check].strip('"') == ton:
# For titles do additional check for kind
if kind != 'tt' or title_kind == item['kind']:
return item.getID()
elif kind == 'tt':
title_only_matches.append(item.getID())
# imdbpy2sql.py could detected wrong type, so if no title and kind
# matches found - collect all results with title only match
# Return list of IDs if multiple matches (can happen when searching
# titles with no title_kind specified)
# Example: DB: Band of Brothers "tv series" vs "tv mini-series"
if title_only_matches:
if len(title_only_matches) == 1:
return title_only_matches[0]
else:
return title_only_matches
return None
def title2imdbID(self, title, kind=None):
"""Translate a movie title (in the plain text data files format)
to an imdbID.
Try an Exact Primary Title search on IMDb;
return None if it's unable to get the imdbID;
Always specify kind: movie, tv series, video game etc. or search can
return list of IDs if multiple matches found
"""
return self._searchIMDb('tt', title, kind)
def name2imdbID(self, name):
"""Translate a person name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('nm', name)
def character2imdbID(self, name):
"""Translate a character name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('char', name)
def company2imdbID(self, name):
"""Translate a company name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('co', name)
def get_imdbID(self, mop):
"""Return the imdbID for the given Movie, Person, Character or Company
object."""
imdbID = None
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
if isinstance(mop, Movie.Movie):
if mop.movieID is not None:
imdbID = aSystem.get_imdbMovieID(mop.movieID)
else:
imdbID = aSystem.title2imdbID(build_title(mop, canonical=0,
ptdf=0, appendKind=False),
mop['kind'])
elif isinstance(mop, Person.Person):
if mop.personID is not None:
imdbID = aSystem.get_imdbPersonID(mop.personID)
else:
imdbID = aSystem.name2imdbID(build_name(mop, canonical=1))
elif isinstance(mop, Character.Character):
if mop.characterID is not None:
imdbID = aSystem.get_imdbCharacterID(mop.characterID)
else:
# canonical=0 ?
imdbID = aSystem.character2imdbID(build_name(mop, canonical=1))
elif isinstance(mop, Company.Company):
if mop.companyID is not None:
imdbID = aSystem.get_imdbCompanyID(mop.companyID)
else:
imdbID = aSystem.company2imdbID(build_company_name(mop))
else:
raise IMDbError('object ' + repr(mop) + \
' is not a Movie, Person or Character instance')
return imdbID
def get_imdbURL(self, mop):
"""Return the main IMDb URL for the given Movie, Person,
Character or Company object, or None if unable to get it."""
imdbID = self.get_imdbID(mop)
if imdbID is None:
return None
if isinstance(mop, Movie.Movie):
url_firstPart = imdbURL_movie_main
elif isinstance(mop, Person.Person):
url_firstPart = imdbURL_person_main
elif isinstance(mop, Character.Character):
url_firstPart = imdbURL_character_main
elif isinstance(mop, Company.Company):
url_firstPart = imdbURL_company_main
else:
raise IMDbError('object ' + repr(mop) + \
' is not a Movie, Person, Character or Company instance')
return url_firstPart % imdbID
def get_special_methods(self):
"""Return the special methods defined by the subclass."""
sm_dict = {}
base_methods = []
for name in dir(IMDbBase):
member = getattr(IMDbBase, name)
if isinstance(member, MethodType):
base_methods.append(name)
for name in dir(self.__class__):
if name.startswith('_') or name in base_methods or \
name.startswith('get_movie_') or \
name.startswith('get_person_') or \
name.startswith('get_company_') or \
name.startswith('get_character_'):
continue
member = getattr(self.__class__, name)
if isinstance(member, MethodType):
sm_dict.update({name: member.__doc__})
return sm_dict
|
mtp1376/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/howcast.py
|
38
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class HowcastIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
_TEST = {
'url': 'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
'md5': '8b743df908c42f60cf6496586c7f12c3',
'info_dict': {
'id': '390161',
'ext': 'mp4',
'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
'title': 'How to Tie a Square Knot Properly',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
self.report_extraction(video_id)
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
webpage, 'video URL')
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
webpage, 'description', fatal=False)
return {
'id': video_id,
'url': video_url,
'title': self._og_search_title(webpage),
'description': video_description,
'thumbnail': self._og_search_thumbnail(webpage),
}
|
chachan/nodeshot
|
refs/heads/master
|
nodeshot/community/notifications/urls.py
|
5
|
from django.conf.urls import patterns, url
urlpatterns = patterns('nodeshot.community.notifications.views', # noqa
url(r'^account/notifications/$', 'notification_list', name='api_notification_list'),
url(r'^account/notifications/(?P<pk>[0-9]+)/$', 'notification_detail', name='api_notification_detail'),
# email settings
url(r'^account/notifications/email-settings/$',
'notification_email_settings',
name='api_notification_email_settings'),
# web settings
url(r'^account/notifications/web-settings/$',
'notification_web_settings',
name='api_notification_web_settings'),
)
|
mitchrule/Miscellaneous
|
refs/heads/master
|
Django_Project/django/Lib/site-packages/django/conf/urls/i18n.py
|
113
|
import warnings
from django.conf import settings
from django.conf.urls import patterns, url
from django.core.urlresolvers import LocaleRegexURLResolver
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.views.i18n import set_language
def i18n_patterns(prefix, *args):
"""
Adds the language code prefix to every URL pattern within this
function. This may only be used in the root URLconf, not in an included
URLconf.
"""
if isinstance(prefix, six.string_types):
warnings.warn(
"Calling i18n_patterns() with the `prefix` argument and with tuples "
"instead of django.conf.urls.url() instances is deprecated and "
"will no longer work in Django 2.0. Use a list of "
"django.conf.urls.url() instances instead.",
RemovedInDjango20Warning, stacklevel=2
)
pattern_list = patterns(prefix, *args)
else:
pattern_list = [prefix] + list(args)
if not settings.USE_I18N:
return pattern_list
return [LocaleRegexURLResolver(pattern_list)]
urlpatterns = [
url(r'^setlang/$', set_language, name='set_language'),
]
|
andir/ipv6.watch
|
refs/heads/master
|
generate.py
|
1
|
#!/usr/bin/env python3
from typing import Dict, Any
import argparse
import asyncio
import datetime
import logging
import os
import time
from pprint import pformat
import aiodns
import jsonschema
import yaml
from jinja2 import Environment, FileSystemLoader, Template
from prometheus_client import Gauge, CollectorRegistry
from prometheus_client.exposition import generate_latest as prometheus_generate_latest
logger = logging.getLogger(__name__)
config_schema = {
"type": "Object",
"attributes": {
"nameservers": {"type": "Object"},
"targets": {"type": "Object"},
"messages": {"type": "Object"},
},
}
def writeable_dir(values):
prospective_dir = values
if not os.path.isdir(prospective_dir):
raise argparse.ArgumentTypeError(
"writeable_dir:{0} is not a valid path".format(prospective_dir)
)
if not os.access(prospective_dir, os.W_OK | os.R_OK):
raise argparse.ArgumentTypeError(
"writeable_dir:{0} is not a writeable dir".format(prospective_dir)
)
return prospective_dir
def prepare_resolvers(nameservers, loop=None):
if not loop:
loop = asyncio.get_event_loop()
resolvers = {}
for name, servers in nameservers.items():
resolvers[name] = list(
(server, aiodns.DNSResolver(loop=loop, nameservers=[server]))
for server in servers
)
return resolvers
async def resolve_host(target, resolver, context=None):
try:
response = await resolver.query(target, "AAAA")
except aiodns.error.DNSError:
return False, context
if len(response) == 0:
return False, context
return True, context
async def resolve_target(target, resolvers):
tasks = []
for host in target["hosts"]:
for name, r in resolvers.items():
for resolver in r:
resolver_params = (host, name, resolver[0])
tasks.append(resolve_host(host, resolver[1], resolver_params))
results = {}
r = await asyncio.wait(tasks)
for l in r:
for task in l:
result = task.result()
response, context = task.result()
host, resolver_name, nameserver = context
if response:
logger.info("\033[0;32m✓\033[0m\t%s @ %s", host, nameserver)
else:
logger.info("\033[0;31m✗\033[0m\t%s @ %s", host, nameserver)
h = results[host] = results.get(host, {})
r = h[resolver_name] = h.get(resolver_name, {})
r[nameserver] = response
return results
def generate_message(media, target, conf, result):
if media in conf:
template = Template(media.get(result))
return template.render(target=target,
conf=conf,
result=result,
media=media)
else:
raise RuntimeError("Invalid media {} for {}".format(media, target))
async def handle_target(resolvers, name, target):
result = await resolve_target(target, resolvers)
msg = "none"
if any(
success
for host, rs in result.items()
for resolver, servers in rs.items()
for server, success in servers.items()
):
msg = "some"
if all(
success
for host, rs in result.items()
for resolver, servers in rs.items()
for server, success in servers.items()
):
msg = "all"
return name, dict(hosts=result, summary=msg)
def generate_prometheus_metrics(results) -> bytes:
"""
Generate the prometheus representation of our measurments
"""
registry = CollectorRegistry()
has_ipv6_gauage = Gauge(
"ipv6_watch_has_ipv6",
"AAA resolve status",
labelnames=("resolver", "resolver_provider", "site", "host"),
registry=registry,
)
summary_gauage = Gauge(
"ipv6_watch_summary",
"AAA resolve status",
labelnames=("site",),
registry=registry,
)
update_timestamp = Gauge(
"ipv6_watch_last_update",
"Unix timestamp of last update",
registry=registry
)
update_timestamp.set(int(time.time()))
for site, site_results in results.items():
summary_value = -1
if site_results["summary"] == "none":
summary_value = 0
elif site_results["summary"] == "some":
summary_value = 0.5
elif site_results["summary"] == "all":
summary_value = 1
summary_gauage.labels(site=site).set(summary_value)
for host, host_results in site_results["hosts"].items():
for resolver_provider, resolve_results in host_results.items():
for resolver, res in resolve_results.items():
has_ipv6_gauage.labels(
site=site,
host=host,
resolver_provider=resolver_provider,
resolver=resolver,
).set(res)
return prometheus_generate_latest(registry)
async def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c",
"--config",
dest="config",
default="conf.yaml",
type=argparse.FileType("r"),
)
parser.add_argument(
"-l",
"--log-level",
dest="log_level",
choices=["DEBUG", "ERROR", "INFO", "WARN"],
help="Debug level",
default="INFO",
)
parser.add_argument("dest", default="dist", type=writeable_dir)
args = parser.parse_args()
log_level = getattr(logging, args.log_level)
logging.basicConfig(level=log_level)
config = yaml.load(args.config)
# TODO: add item validation
jsonschema.validate(config_schema, config)
nameservers = config["nameservers"]
targets = config["targets"]
loop = asyncio.get_event_loop()
resolvers = prepare_resolvers(nameservers, loop)
results: Dict[str, Any] = {}
tasks = []
for name, target in targets.items():
tasks.append(handle_target(resolvers, name, target))
tasks = await asyncio.wait(tasks)
for task_list in tasks:
for task in task_list:
name, result = task.result()
results[name] = result
prometheus_metrics = generate_prometheus_metrics(results)
results = sorted(results.items(), key=lambda x: x[0].lower())
logging.debug(pformat(results))
jinja_env = Environment(loader=FileSystemLoader("templates/"))
template = jinja_env.get_template("index.jinja2")
with open(os.path.join(args.dest, "index.html"), "w") as fh:
fh.write(
template.render(
long_date=datetime.datetime.now().strftime("%B %Y"),
results=results,
targets=targets,
messages=config["messages"],
date=datetime.datetime.utcnow(),
)
)
with open(os.path.join(args.dest, "metrics"), "wb") as fh:
fh.write(prometheus_metrics)
if __name__ == "__main__":
asyncio.get_event_loop().run_until_complete(main())
|
donghwicha/playgroundPython
|
refs/heads/master
|
designPattern/decorator/1.with-abc1.py
|
3
|
import abc
class Shape(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def draw(self):
"""need to be implemented"""
return
class Rectangle(Shape):
def draw(self):
print "this is rectangluar"
class Circle(Shape):
def draw(self):
print "this is circular"
class RedShapeDecor(Shape):
def __init__(self,shape):
self.shape = shape
def draw(self):
print "this is red thing"
self.shape.draw()
if __name__ == "__main__":
c = Circle()
r = Rectangle()
r1 = RedShapeDecor(c)
r1.draw()
r2 = RedShapeDecor(r)
r2.draw()
|
claws/AutobahnPython
|
refs/heads/master
|
examples/asyncio/wamp/basic/rpc/decorators/frontend.py
|
5
|
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn.asyncio.wamp import ApplicationSession
class Component(ApplicationSession):
"""
An application component calling the different backend procedures.
"""
@asyncio.coroutine
def onJoin(self, details):
procs = [u'com.mathservice.add2',
u'com.mathservice.mul2',
u'com.mathservice.div2']
try:
for proc in procs:
res = yield from self.call(proc, 2, 3)
print("{}: {}".format(proc, res))
except Exception as e:
print("Something went wrong: {}".format(e))
self.leave()
def onDisconnect(self):
asyncio.get_event_loop().stop()
|
mrj1018/cyaron
|
refs/heads/master
|
cyaron/consts.py
|
2
|
from __future__ import absolute_import
import math
import string
"""Constants Package.
Constants:
ALPHABET_SMALL -> All the lower ascii letters
ALPHABET_CAPITAL -> All the upper ascii letters
ALPHABET -> All the ascii letters
NUMBERS -> All the numbers(0-9)
SENTENCE_SEPARATORS -> Includes 70% ",", 20% ";" and 10% ":"
SENTENCE_TERMINATORS -> Includes 80% "." and 20% "!"
"""
PI = math.pi
E = math.e
ALPHABET_SMALL = string.ascii_lowercase
ALPHABET_CAPITAL = string.ascii_uppercase
ALPHABET = ALPHABET_SMALL + ALPHABET_CAPITAL
NUMBERS = string.digits
SENTENCE_SEPARATORS = ',,,,,,,;;:' # 70% ',' 20% ';' 10% ':'
SENTENCE_TERMINATORS = '....!' # 80% '.' 20% '!'
DEFAULT_GRADER = "NOIPStyle"
|
memo/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/check_ops.py
|
15
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Asserts and Boolean Checks.
See the @{$python/check_ops} guide.
@@assert_negative
@@assert_positive
@@assert_non_negative
@@assert_non_positive
@@assert_equal
@@assert_none_equal
@@assert_less
@@assert_less_equal
@@assert_greater
@@assert_greater_equal
@@assert_rank
@@assert_rank_at_least
@@assert_type
@@assert_integer
@@assert_proper_iterable
@@assert_same_float_dtype
@@assert_scalar
@@is_non_decreasing
@@is_numeric_tensor
@@is_strictly_increasing
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import compat
NUMERIC_TYPES = frozenset(
[dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16, dtypes.int32,
dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32, dtypes.quint8,
dtypes.complex64])
__all__ = [
'assert_negative',
'assert_positive',
'assert_proper_iterable',
'assert_non_negative',
'assert_non_positive',
'assert_equal',
'assert_none_equal',
'assert_integer',
'assert_less',
'assert_less_equal',
'assert_greater',
'assert_greater_equal',
'assert_rank',
'assert_rank_at_least',
'assert_rank_in',
'assert_same_float_dtype',
'assert_scalar',
'assert_type',
'is_non_decreasing',
'is_numeric_tensor',
'is_strictly_increasing',
]
def _maybe_constant_value_string(t):
if not isinstance(t, ops.Tensor):
return str(t)
const_t = tensor_util.constant_value(t)
if const_t is not None:
return str(const_t)
return t
def _assert_static(condition, data):
"""Raises a static ValueError with as much information as possible."""
if not condition:
data_static = [_maybe_constant_value_string(x) for x in data]
raise ValueError('\n'.join(data_static))
def assert_proper_iterable(values):
"""Static assert that values is a "proper" iterable.
`Ops` that expect iterables of `Tensor` can call this to validate input.
Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
"""
unintentional_iterables = (
(ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)
+ compat.bytes_or_text_types
)
if isinstance(values, unintentional_iterables):
raise TypeError(
'Expected argument "values" to be a "proper" iterable. Found: %s' %
type(values))
if not hasattr(values, '__iter__'):
raise TypeError(
'Expected argument "values" to be iterable. Found: %s' % type(values))
def assert_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_negative(x)]):
output = tf.reduce_sum(x)
```
Negative means, for every element `x[i]` of `x`, we have `x[i] < 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message,
'Condition x < 0 did not hold element-wise:',
'x (%s) = ' % x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(x, zero, data=data, summarize=summarize)
def assert_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_positive(x)]):
output = tf.reduce_sum(x)
```
Positive means, for every element `x[i]` of `x`, we have `x[i] > 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message, 'Condition x > 0 did not hold element-wise:',
'x (%s) = ' % x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(zero, x, data=data, summarize=summarize)
def assert_non_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x >= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_negative(x)]):
output = tf.reduce_sum(x)
```
Non-negative means, for every element `x[i]` of `x`, we have `x[i] >= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_non_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message,
'Condition x >= 0 did not hold element-wise:',
'x (%s) = ' % x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(zero, x, data=data, summarize=summarize)
def assert_non_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_positive(x)]):
output = tf.reduce_sum(x)
```
Non-positive means, for every element `x[i]` of `x`, we have `x[i] <= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_non_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
data = [
message,
'Condition x <= 0 did not hold element-wise:'
'x (%s) = ' % x.name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(x, zero, data=data, summarize=summarize)
def assert_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x == y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] == y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_equal".
Returns:
Op that raises `InvalidArgumentError` if `x == y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x == y did not hold element-wise:',
'x (%s) = ' % x.name, x,
'y (%s) = ' % y.name, y
]
condition = math_ops.reduce_all(math_ops.equal(x, y))
x_static = tensor_util.constant_value(x)
y_static = tensor_util.constant_value(y)
if x_static is not None and y_static is not None:
condition_static = (x_static == y_static).all()
_assert_static(condition_static, data)
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_none_equal(
x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x != y` holds for all elements.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_none_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] != y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_none_equal".
Returns:
Op that raises `InvalidArgumentError` if `x != y` is ever False.
"""
message = message or ''
with ops.name_scope(name, 'assert_none_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x != y did not hold for every single element:'
'x (%s) = ' % x.name, x,
'y (%s) = ' % y.name, y
]
condition = math_ops.reduce_all(math_ops.not_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_less(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_less(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] < y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_less".
Returns:
Op that raises `InvalidArgumentError` if `x < y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_less', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x < y did not hold element-wise:'
'x (%s) = ' % x.name, x, 'y (%s) = ' % y.name, y
]
condition = math_ops.reduce_all(math_ops.less(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_less_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] <= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_less_equal"
Returns:
Op that raises `InvalidArgumentError` if `x <= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_less_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x <= y did not hold element-wise:'
'x (%s) = ' % x.name, x, 'y (%s) = ' % y.name, y
]
condition = math_ops.reduce_all(math_ops.less_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_greater(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] > y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_greater".
Returns:
Op that raises `InvalidArgumentError` if `x > y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x > y did not hold element-wise:'
'x (%s) = ' % x.name, x, 'y (%s) = ' % y.name, y
]
condition = math_ops.reduce_all(math_ops.greater(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_greater_equal(x, y, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x >= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] >= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_greater_equal"
Returns:
Op that raises `InvalidArgumentError` if `x >= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if data is None:
data = [
message,
'Condition x >= y did not hold element-wise:'
'x (%s) = ' % x.name, x, 'y (%s) = ' % y.name, y
]
condition = math_ops.reduce_all(math_ops.greater_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def _assert_rank_condition(
x, rank, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
static_condition: A python function that takes `[actual_rank, given_rank]`
and returns `True` if the condition is satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_rank]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
rank_static = tensor_util.constant_value(rank)
if rank_static is not None:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, rank_static):
raise ValueError(
'Static rank condition failed', x_rank_static, rank_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), rank)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar integer `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_rank".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank == given_rank
dynamic_condition = math_ops.equal
if data is None:
data = [
message,
'Tensor %s must have rank' % x.name, rank, 'Received shape: ',
array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank %d. Received rank %d, shape %s' %
(message, x.name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def assert_rank_at_least(
x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank` or higher.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_at_least(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_least".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or higher.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(
name, 'assert_rank_at_least', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank
dynamic_condition = math_ops.greater_equal
if data is None:
data = [
message,
'Tensor %s must have rank at least' % x.name, rank,
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank at least %d. Received rank %d, '
'shape %s' % (message, x.name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def _static_rank_in(actual_rank, given_ranks):
return actual_rank in given_ranks
def _dynamic_rank_in(actual_rank, given_ranks):
if len(given_ranks) < 1:
return ops.convert_to_tensor(False)
result = math_ops.equal(given_ranks[0], actual_rank)
for given_rank in given_ranks[1:]:
result = math_ops.logical_or(
result, math_ops.equal(given_rank, actual_rank))
return result
def _assert_ranks_condition(
x, ranks, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
ranks: Scalar `Tensor`.
static_condition: A python function that takes
`[actual_rank, given_ranks]` and returns `True` if the condition is
satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_ranks]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
for rank in ranks:
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks])
if None not in ranks_static:
for rank_static in ranks_static:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, ranks_static):
raise ValueError(
'Static rank condition failed', x_rank_static, ranks_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), ranks)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
for rank, rank_static in zip(ranks, ranks_static):
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
def assert_rank_in(
x, ranks, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank in `ranks`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_in(x, (2, 4))]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
ranks: Iterable of scalar `Tensor` objects.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_in".
Returns:
Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.
If static checks determine `x` has matching rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has mismatched rank.
"""
with ops.name_scope(
name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])
message = message or ''
if data is None:
data = [
message, 'Tensor %s must have rank in' % x.name
] + list(ranks) + [
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_ranks_condition(x, ranks, _static_rank_in,
_dynamic_rank_in, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank in %s. Received rank %d, '
'shape %s' % (message, x.name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def assert_integer(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_integer(x)]):
output = tf.reduce_sum(x)
```
Args:
x: `Tensor` whose basetype is integer and is not quantized.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is anything other than non-quantized integer.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_integer', [x]):
x = ops.convert_to_tensor(x, name='x')
if not x.dtype.is_integer:
err_msg = (
'%s Expected "x" to be integer type. Found: %s of dtype %s'
% (message, x.name, x.dtype))
raise TypeError(err_msg)
return control_flow_ops.no_op('statically_determined_was_integer')
def assert_type(tensor, tf_type, message=None, name=None):
"""Statically asserts that the given `Tensor` is of the specified type.
Args:
tensor: A tensorflow `Tensor`.
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
name: A name to give this `Op`. Defaults to "assert_type"
Raises:
TypeError: If the tensors data type doesn't match `tf_type`.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_type', [tensor]):
tensor = ops.convert_to_tensor(tensor, name='tensor')
if tensor.dtype != tf_type:
raise TypeError(
'%s %s must be of type %s' % (message, tensor.op.name, tf_type))
return control_flow_ops.no_op('statically_determined_correct_type')
# pylint: disable=line-too-long
def _get_diff_for_monotonic_comparison(x):
"""Gets the difference x[1:] - x[:-1]."""
x = array_ops.reshape(x, [-1])
if not is_numeric_tensor(x):
raise TypeError('Expected x to be numeric, instead found: %s' % x)
# If x has less than 2 elements, there is nothing to compare. So return [].
is_shorter_than_two = math_ops.less(array_ops.size(x), 2)
short_result = lambda: ops.convert_to_tensor([], dtype=x.dtype)
# With 2 or more elements, return x[1:] - x[:-1]
s_len = array_ops.shape(x) - 1
diff = lambda: array_ops.strided_slice(x, [1], [1] + s_len)- array_ops.strided_slice(x, [0], s_len)
return control_flow_ops.cond(is_shorter_than_two, short_result, diff)
def is_numeric_tensor(tensor):
return isinstance(tensor, ops.Tensor) and tensor.dtype in NUMERIC_TYPES
def is_non_decreasing(x, name=None):
"""Returns `True` if `x` is non-decreasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
If `x` has less than two elements, it is trivially non-decreasing.
See also: `is_strictly_increasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional). Defaults to "is_non_decreasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_non_decreasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less_equal(zero, diff))
def is_strictly_increasing(x, name=None):
"""Returns `True` if `x` is strictly increasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
If `x` has less than two elements, it is trivially strictly increasing.
See also: `is_non_decreasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional).
Defaults to "is_strictly_increasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_strictly_increasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less(zero, diff))
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be a floating point type. If neither `tensors` nor `dtype` is supplied,
the function will return `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float, or the common type of the inputs is not a floating point type.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected floating point type, got %s.' % dtype)
return dtype
def assert_scalar(tensor, name=None):
with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor, name=name_scope)
shape = tensor.get_shape()
if shape.ndims != 0:
raise ValueError('Expected scalar shape for %s, saw shape: %s.'
% (tensor.name, shape))
return tensor
|
jordigh/mercurial-crew
|
refs/heads/master
|
mercurial/repoview.py
|
92
|
# repoview.py - Filtered view of a localrepo object
#
# Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
# Logilab SA <contact@logilab.fr>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import copy
import phases
import util
import obsolete, revset
def hideablerevs(repo):
"""Revisions candidates to be hidden
This is a standalone function to help extensions to wrap it."""
return obsolete.getrevs(repo, 'obsolete')
def computehidden(repo):
"""compute the set of hidden revision to filter
During most operation hidden should be filtered."""
assert not repo.changelog.filteredrevs
hideable = hideablerevs(repo)
if hideable:
cl = repo.changelog
firsthideable = min(hideable)
revs = cl.revs(start=firsthideable)
blockers = [r for r in revset._children(repo, revs, hideable)
if r not in hideable]
for par in repo[None].parents():
blockers.append(par.rev())
for bm in repo._bookmarks.values():
blockers.append(repo[bm].rev())
blocked = cl.ancestors(blockers, inclusive=True)
return frozenset(r for r in hideable if r not in blocked)
return frozenset()
def computeunserved(repo):
"""compute the set of revision that should be filtered when used a server
Secret and hidden changeset should not pretend to be here."""
assert not repo.changelog.filteredrevs
# fast path in simple case to avoid impact of non optimised code
hiddens = filterrevs(repo, 'visible')
if phases.hassecret(repo):
cl = repo.changelog
secret = phases.secret
getphase = repo._phasecache.phase
first = min(cl.rev(n) for n in repo._phasecache.phaseroots[secret])
revs = cl.revs(start=first)
secrets = set(r for r in revs if getphase(repo, r) >= secret)
return frozenset(hiddens | secrets)
else:
return hiddens
def computemutable(repo):
"""compute the set of revision that should be filtered when used a server
Secret and hidden changeset should not pretend to be here."""
assert not repo.changelog.filteredrevs
# fast check to avoid revset call on huge repo
if util.any(repo._phasecache.phaseroots[1:]):
getphase = repo._phasecache.phase
maymutable = filterrevs(repo, 'base')
return frozenset(r for r in maymutable if getphase(repo, r))
return frozenset()
def computeimpactable(repo):
"""Everything impactable by mutable revision
The immutable filter still have some chance to get invalidated. This will
happen when:
- you garbage collect hidden changeset,
- public phase is moved backward,
- something is changed in the filtering (this could be fixed)
This filter out any mutable changeset and any public changeset that may be
impacted by something happening to a mutable revision.
This is achieved by filtered everything with a revision number egal or
higher than the first mutable changeset is filtered."""
assert not repo.changelog.filteredrevs
cl = repo.changelog
firstmutable = len(cl)
for roots in repo._phasecache.phaseroots[1:]:
if roots:
firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
# protect from nullrev root
firstmutable = max(0, firstmutable)
return frozenset(xrange(firstmutable, len(cl)))
# function to compute filtered set
filtertable = {'visible': computehidden,
'served': computeunserved,
'immutable': computemutable,
'base': computeimpactable}
### Nearest subset relation
# Nearest subset of filter X is a filter Y so that:
# * Y is included in X,
# * X - Y is as small as possible.
# This create and ordering used for branchmap purpose.
# the ordering may be partial
subsettable = {None: 'visible',
'visible': 'served',
'served': 'immutable',
'immutable': 'base'}
def filterrevs(repo, filtername):
"""returns set of filtered revision for this filter name"""
if filtername not in repo.filteredrevcache:
func = filtertable[filtername]
repo.filteredrevcache[filtername] = func(repo.unfiltered())
return repo.filteredrevcache[filtername]
class repoview(object):
"""Provide a read/write view of a repo through a filtered changelog
This object is used to access a filtered version of a repository without
altering the original repository object itself. We can not alter the
original object for two main reasons:
- It prevents the use of a repo with multiple filters at the same time. In
particular when multiple threads are involved.
- It makes scope of the filtering harder to control.
This object behaves very closely to the original repository. All attribute
operations are done on the original repository:
- An access to `repoview.someattr` actually returns `repo.someattr`,
- A write to `repoview.someattr` actually sets value of `repo.someattr`,
- A deletion of `repoview.someattr` actually drops `someattr`
from `repo.__dict__`.
The only exception is the `changelog` property. It is overridden to return
a (surface) copy of `repo.changelog` with some revisions filtered. The
`filtername` attribute of the view control the revisions that need to be
filtered. (the fact the changelog is copied is an implementation detail).
Unlike attributes, this object intercepts all method calls. This means that
all methods are run on the `repoview` object with the filtered `changelog`
property. For this purpose the simple `repoview` class must be mixed with
the actual class of the repository. This ensures that the resulting
`repoview` object have the very same methods than the repo object. This
leads to the property below.
repoview.method() --> repo.__class__.method(repoview)
The inheritance has to be done dynamically because `repo` can be of any
subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
"""
def __init__(self, repo, filtername):
object.__setattr__(self, '_unfilteredrepo', repo)
object.__setattr__(self, 'filtername', filtername)
object.__setattr__(self, '_clcachekey', None)
object.__setattr__(self, '_clcache', None)
# not a propertycache on purpose we shall implement a proper cache later
@property
def changelog(self):
"""return a filtered version of the changeset
this changelog must not be used for writing"""
# some cache may be implemented later
unfi = self._unfilteredrepo
unfichangelog = unfi.changelog
revs = filterrevs(unfi, self.filtername)
cl = self._clcache
newkey = (len(unfichangelog), unfichangelog.tip(), hash(revs))
if cl is not None:
# we need to check curkey too for some obscure reason.
# MQ test show a corruption of the underlying repo (in _clcache)
# without change in the cachekey.
oldfilter = cl.filteredrevs
try:
cl.filterrevs = () # disable filtering for tip
curkey = (len(cl), cl.tip(), hash(oldfilter))
finally:
cl.filteredrevs = oldfilter
if newkey != self._clcachekey or newkey != curkey:
cl = None
# could have been made None by the previous if
if cl is None:
cl = copy.copy(unfichangelog)
cl.filteredrevs = revs
object.__setattr__(self, '_clcache', cl)
object.__setattr__(self, '_clcachekey', newkey)
return cl
def unfiltered(self):
"""Return an unfiltered version of a repo"""
return self._unfilteredrepo
def filtered(self, name):
"""Return a filtered version of a repository"""
if name == self.filtername:
return self
return self.unfiltered().filtered(name)
# everything access are forwarded to the proxied repo
def __getattr__(self, attr):
return getattr(self._unfilteredrepo, attr)
def __setattr__(self, attr, value):
return setattr(self._unfilteredrepo, attr, value)
def __delattr__(self, attr):
return delattr(self._unfilteredrepo, attr)
# The `requirements` attribute is initialized during __init__. But
# __getattr__ won't be called as it also exists on the class. We need
# explicit forwarding to main repo here
@property
def requirements(self):
return self._unfilteredrepo.requirements
|
qedi-r/home-assistant
|
refs/heads/dev
|
homeassistant/components/luftdaten/config_flow.py
|
5
|
"""Config flow to configure the Luftdaten component."""
from collections import OrderedDict
from luftdaten import Luftdaten
from luftdaten.exceptions import LuftdatenConnectionError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_SHOW_ON_MAP,
)
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
import homeassistant.helpers.config_validation as cv
from .const import CONF_SENSOR_ID, DEFAULT_SCAN_INTERVAL, DOMAIN
@callback
def configured_sensors(hass):
"""Return a set of configured Luftdaten sensors."""
return set(
entry.data[CONF_SENSOR_ID]
for entry in hass.config_entries.async_entries(DOMAIN)
)
@callback
def duplicate_stations(hass):
"""Return a set of duplicate configured Luftdaten stations."""
stations = [
int(entry.data[CONF_SENSOR_ID])
for entry in hass.config_entries.async_entries(DOMAIN)
]
return {x for x in stations if stations.count(x) > 1}
@config_entries.HANDLERS.register(DOMAIN)
class LuftDatenFlowHandler(config_entries.ConfigFlow):
"""Handle a Luftdaten config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
@callback
def _show_form(self, errors=None):
"""Show the form to the user."""
data_schema = OrderedDict()
data_schema[vol.Required(CONF_SENSOR_ID)] = cv.positive_int
data_schema[vol.Optional(CONF_SHOW_ON_MAP, default=False)] = bool
return self.async_show_form(
step_id="user", data_schema=vol.Schema(data_schema), errors=errors or {}
)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return self._show_form()
sensor_id = user_input[CONF_SENSOR_ID]
if sensor_id in configured_sensors(self.hass):
return self._show_form({CONF_SENSOR_ID: "sensor_exists"})
session = aiohttp_client.async_get_clientsession(self.hass)
luftdaten = Luftdaten(user_input[CONF_SENSOR_ID], self.hass.loop, session)
try:
await luftdaten.get_data()
valid = await luftdaten.validate_sensor()
except LuftdatenConnectionError:
return self._show_form({CONF_SENSOR_ID: "communication_error"})
if not valid:
return self._show_form({CONF_SENSOR_ID: "invalid_sensor"})
available_sensors = [
x for x in luftdaten.values if luftdaten.values[x] is not None
]
if available_sensors:
user_input.update(
{CONF_SENSORS: {CONF_MONITORED_CONDITIONS: available_sensors}}
)
scan_interval = user_input.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
user_input.update({CONF_SCAN_INTERVAL: scan_interval.seconds})
return self.async_create_entry(title=str(sensor_id), data=user_input)
|
horizontracy/rpi_tool
|
refs/heads/master
|
api/venv/lib/python2.7/site-packages/setuptools/site-patch.py
|
720
|
def __boot():
import sys
import os
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d, nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p, np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
|
lmazuel/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/local_network_gateway.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LocalNetworkGateway(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param local_network_address_space: Local network site address space.
:type local_network_address_space:
~azure.mgmt.network.v2017_10_01.models.AddressSpace
:param gateway_ip_address: IP address of local network gateway.
:type gateway_ip_address: str
:param bgp_settings: Local network gateway's BGP speaker settings.
:type bgp_settings: ~azure.mgmt.network.v2017_10_01.models.BgpSettings
:param resource_guid: The resource GUID property of the
LocalNetworkGateway resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting',
and 'Failed'.
:vartype provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'local_network_address_space': {'key': 'properties.localNetworkAddressSpace', 'type': 'AddressSpace'},
'gateway_ip_address': {'key': 'properties.gatewayIpAddress', 'type': 'str'},
'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(LocalNetworkGateway, self).__init__(**kwargs)
self.local_network_address_space = kwargs.get('local_network_address_space', None)
self.gateway_ip_address = kwargs.get('gateway_ip_address', None)
self.bgp_settings = kwargs.get('bgp_settings', None)
self.resource_guid = kwargs.get('resource_guid', None)
self.provisioning_state = None
self.etag = kwargs.get('etag', None)
|
AsteroidOS/android_external_skia
|
refs/heads/master
|
gm/rebaseline_server/fix_pythonpath.py
|
66
|
#!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Adds [trunk]/gm and [trunk]/tools to PYTHONPATH, if they aren't already there.
"""
import os
import sys
TRUNK_DIRECTORY = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
GM_DIRECTORY = os.path.join(TRUNK_DIRECTORY, 'gm')
TOOLS_DIRECTORY = os.path.join(TRUNK_DIRECTORY, 'tools')
if GM_DIRECTORY not in sys.path:
sys.path.append(GM_DIRECTORY)
if TOOLS_DIRECTORY not in sys.path:
sys.path.append(TOOLS_DIRECTORY)
|
Lujeni/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_ips_global.py
|
14
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_ips_global
short_description: Configure IPS global parameter in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify ips feature and global category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
ips_global:
description:
- Configure IPS global parameter.
default: null
type: dict
suboptions:
anomaly_mode:
description:
- Global blocking mode for rate-based anomalies.
type: str
choices:
- periodical
- continuous
database:
description:
- Regular or extended IPS database. Regular protects against the latest common and in-the-wild attacks. Extended includes protection from
legacy attacks.
type: str
choices:
- regular
- extended
deep_app_insp_db_limit:
description:
- Limit on number of entries in deep application inspection database (1 - 2147483647, 0 = use recommended setting)
type: int
deep_app_insp_timeout:
description:
- Timeout for Deep application inspection (1 - 2147483647 sec., 0 = use recommended setting).
type: int
engine_count:
description:
- Number of IPS engines running. If set to the default value of 0, FortiOS sets the number to optimize performance depending on the number
of CPU cores.
type: int
exclude_signatures:
description:
- Excluded signatures.
type: str
choices:
- none
- industrial
fail_open:
description:
- Enable to allow traffic if the IPS process crashes. Default is disable and IPS traffic is blocked when the IPS process crashes.
type: str
choices:
- enable
- disable
intelligent_mode:
description:
- Enable/disable IPS adaptive scanning (intelligent mode). Intelligent mode optimizes the scanning method for the type of traffic.
type: str
choices:
- enable
- disable
session_limit_mode:
description:
- Method of counting concurrent sessions used by session limit anomalies. Choose between greater accuracy (accurate) or improved
performance (heuristics).
type: str
choices:
- accurate
- heuristic
skype_client_public_ipaddr:
description:
- Public IP addresses of your network that receive Skype sessions. Helps identify Skype sessions. Separate IP addresses with commas.
type: str
socket_size:
description:
- IPS socket buffer size (0 - 256 MB). Default depends on available memory. Can be changed to tune performance.
type: int
sync_session_ttl:
description:
- Enable/disable use of kernel session TTL for IPS sessions.
type: str
choices:
- enable
- disable
traffic_submit:
description:
- Enable/disable submitting attack data found by this FortiGate to FortiGuard.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPS global parameter.
fortios_ips_global:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
ips_global:
anomaly_mode: "periodical"
database: "regular"
deep_app_insp_db_limit: "5"
deep_app_insp_timeout: "6"
engine_count: "7"
exclude_signatures: "none"
fail_open: "enable"
intelligent_mode: "enable"
session_limit_mode: "accurate"
skype_client_public_ipaddr: "<your_own_value>"
socket_size: "13"
sync_session_ttl: "enable"
traffic_submit: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_ips_global_data(json):
option_list = ['anomaly_mode', 'database', 'deep_app_insp_db_limit',
'deep_app_insp_timeout', 'engine_count', 'exclude_signatures',
'fail_open', 'intelligent_mode', 'session_limit_mode',
'skype_client_public_ipaddr', 'socket_size', 'sync_session_ttl',
'traffic_submit']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def ips_global(data, fos):
vdom = data['vdom']
ips_global_data = data['ips_global']
filtered_data = underscore_to_hyphen(filter_ips_global_data(ips_global_data))
return fos.set('ips',
'global',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_ips(data, fos):
if data['ips_global']:
resp = ips_global(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"ips_global": {
"required": False, "type": "dict", "default": None,
"options": {
"anomaly_mode": {"required": False, "type": "str",
"choices": ["periodical", "continuous"]},
"database": {"required": False, "type": "str",
"choices": ["regular", "extended"]},
"deep_app_insp_db_limit": {"required": False, "type": "int"},
"deep_app_insp_timeout": {"required": False, "type": "int"},
"engine_count": {"required": False, "type": "int"},
"exclude_signatures": {"required": False, "type": "str",
"choices": ["none", "industrial"]},
"fail_open": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"intelligent_mode": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"session_limit_mode": {"required": False, "type": "str",
"choices": ["accurate", "heuristic"]},
"skype_client_public_ipaddr": {"required": False, "type": "str"},
"socket_size": {"required": False, "type": "int"},
"sync_session_ttl": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"traffic_submit": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_ips(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_ips(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
crs4/blast-python
|
refs/heads/master
|
BlastPython/IO_SRB.py
|
1
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2014 CRS4.
#
# This file is part of blast-python.
#
# blast-python is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# blast-python is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# blast-python. If not, see <http://www.gnu.org/licenses/>.
#
# END_COPYRIGHT
"""
Drivers to access data files -- SRB access.
"""
import srb
from buffered_IO import buffered_IO
class SRB_IO:
def __init__(self, path, filename, host, port, domain, user, passwd,
bufsize):
self.conn = srb.connect(
host, port, domain, "ENCRYPT1", user, passwd, ""
)
self.fd = srb.obj_open(self.conn, path, filename, 0)
self.buffer_obj = buffered_IO(self, bufsize)
def __iter__(self):
return self
def read(self, size):
return srb.obj_read(self.conn, self.fd, size)
def write(self):
pass
def close(self):
srb.disconnect(self.conn)
def next(self):
line = self.buffer_obj.readline()
if line:
return line
else:
raise StopIteration
|
reddraggone9/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/srf.py
|
102
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
parse_iso8601,
xpath_text,
)
class SrfIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/tv/[^/]+/video/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})'
_TESTS = [{
'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'md5': '4cd93523723beff51bb4bee974ee238d',
'info_dict': {
'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'display_id': 'snowden-beantragt-asyl-in-russland',
'ext': 'm4v',
'upload_date': '20130701',
'title': 'Snowden beantragt Asyl in Russland',
'timestamp': 1372713995,
}
}, {
# No Speichern (Save) button
'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
'md5': 'd97e236e80d1d24729e5d0953d276a4f',
'info_dict': {
'id': '677f5829-e473-4823-ac83-a1087fe97faa',
'display_id': 'jaguar-xk120-shadow-und-tornado-dampflokomotive',
'ext': 'flv',
'upload_date': '20130710',
'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
'timestamp': 1373493600,
},
}, {
'url': 'http://www.srf.ch/player/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'only_matching': True,
}, {
'url': 'https://tp.srgssr.ch/p/flash?urn=urn:srf:ais:video:28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
display_id = re.match(self._VALID_URL, url).group('display_id') or video_id
video_data = self._download_xml(
'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/video/play/%s.xml' % video_id,
display_id)
title = xpath_text(
video_data, './AssetMetadatas/AssetMetadata/title', fatal=True)
thumbnails = [{
'url': s.text
} for s in video_data.findall('.//ImageRepresentation/url')]
timestamp = parse_iso8601(xpath_text(video_data, './createdDate'))
# The <duration> field in XML is different from the exact duration, skipping
formats = []
for item in video_data.findall('./Playlists/Playlist') + video_data.findall('./Downloads/Download'):
for url_node in item.findall('url'):
quality = url_node.attrib['quality']
full_url = url_node.text
original_ext = determine_ext(full_url)
format_id = '%s-%s' % (quality, item.attrib['protocol'])
if original_ext == 'f4m':
formats.extend(self._extract_f4m_formats(
full_url + '?hdcore=3.4.0', display_id, f4m_id=format_id))
elif original_ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
full_url, display_id, 'mp4', m3u8_id=format_id))
else:
formats.append({
'url': full_url,
'ext': original_ext,
'format_id': format_id,
'quality': 0 if 'HD' in quality else -1,
'preference': 1,
})
self._sort_formats(formats)
subtitles = {}
subtitles_data = video_data.find('Subtitles')
if subtitles_data is not None:
subtitles_list = [{
'url': sub.text,
'ext': determine_ext(sub.text),
} for sub in subtitles_data]
if subtitles_list:
subtitles['de'] = subtitles_list
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': title,
'thumbnails': thumbnails,
'timestamp': timestamp,
'subtitles': subtitles,
}
|
Krylon360/vimeo-graphite-web
|
refs/heads/master
|
webapp/graphite/events/views.py
|
2
|
import datetime
import time
from django.http import HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.utils.timezone import localtime, now
from graphite.util import json
from graphite.events import models
from graphite.render.attime import parseATTime
from django.core.urlresolvers import get_script_prefix
def to_timestamp(dt):
return time.mktime(dt.timetuple())
class EventEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return to_timestamp(obj)
return json.JSONEncoder.default(self, obj)
def view_events(request):
if request.method == "GET":
context = { 'events' : fetch(request),
'slash' : get_script_prefix()
}
return render_to_response("events.html", context)
else:
return post_event(request)
def detail(request, event_id):
e = get_object_or_404(models.Event, pk=event_id)
context = { 'event' : e,
'slash' : get_script_prefix()
}
return render_to_response("event.html", context)
def post_event(request):
if request.method == 'POST':
event = json.loads(request.raw_post_data)
assert isinstance(event, dict)
values = {}
values["what"] = event["what"]
values["tags"] = event.get("tags", None)
values["when"] = datetime.datetime.fromtimestamp(
event.get("when", time.time()))
if "data" in event:
values["data"] = event["data"]
e = models.Event(**values)
e.save()
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
def get_data(request):
if 'jsonp' in request.REQUEST:
response = HttpResponse(
"%s(%s)" % (request.REQUEST.get('jsonp'),
json.dumps(fetch(request), cls=EventEncoder)),
mimetype='text/javascript')
else:
response = HttpResponse(
json.dumps(fetch(request), cls=EventEncoder),
mimetype="application/json")
return response
def fetch(request):
#XXX we need to move to USE_TZ=True to get rid of localtime() conversions
if request.GET.get("from", None) is not None:
time_from = localtime(parseATTime(request.GET["from"])).replace(tzinfo=None)
else:
time_from = datetime.datetime.fromtimestamp(0)
if request.GET.get("until", None) is not None:
time_until = localtime(parseATTime(request.GET["until"])).replace(tzinfo=None)
else:
time_until = now()
tags = request.GET.get("tags", None)
if tags is not None:
tags = request.GET.get("tags").split(" ")
return [x.as_dict() for x in
models.Event.find_events(time_from, time_until, tags=tags)]
|
isandlaTech/cohorte-demos
|
refs/heads/dev
|
led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-20141209.234423-41-python-distribution/repo/cohorte/composer/node/distributor_csp.py
|
4
|
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Node Composer: Isolate Distributor based on ``ortools``
Clusters the components of a composition into groups according to several
criteria.
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 1.0.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cohorte.composer.node.beans import EligibleIsolate
# Module version
__version_info__ = (1, 0, 0)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# OR-Tools Linear solver
from ortools.linear_solver import pywraplp as ortools
# Composer
import cohorte.composer
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Provides, \
Instantiate
# Standard library
import logging
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory()
@Provides(cohorte.composer.SERVICE_DISTRIBUTOR_ISOLATE)
@Instantiate('cohorte-composer-node-distributor')
class IsolateDistributor(object):
"""
Clusters components into groups. Each group corresponds to an isolate.
"""
def __init__(self):
"""
Sets up members
"""
# Number of calls to this distributor
self._nb_distribution = 0
# Names of components considered unstable
self.__unstable = set()
def distribute(self, components, existing_isolates):
"""
Computes the distribution of the given components
:param components: A list of RawComponent beans
:param existing_isolates: A set of pre-existing eligible isolates
:return: A tuple of tuples: updated and new EligibleIsolate beans
"""
# Prepare the lists of updated and new isolates
updated_isolates = set()
new_isolates = set()
# Create a map name -> isolate bean
map_isolates = {isolate.name: isolate for isolate in existing_isolates}
# 1. Predefined host isolates
reserved_isolates = set()
remaining = set()
for component in components:
if component.isolate:
isolate_name = component.isolate
reserved_isolates.add(isolate_name)
try:
# Use existing bean
isolate = map_isolates[isolate_name]
isolate.add_component(component)
updated_isolates.add(isolate)
except KeyError:
# Create a new bean
isolate = EligibleIsolate(component.isolate,
component.language,
[component])
map_isolates[isolate_name] = isolate
new_isolates.add(isolate)
else:
# Component must be treated afterwards
remaining.add(component)
# Hide reserved isolates
for isolate_name in reserved_isolates:
map_isolates.pop(isolate_name)
# 2. Unstable components must be isolated
# ... group remaining components by language
remaining_stable = {}
for component in remaining:
if component.name in self.__unstable:
# Component is known as unstable: isolate it
isolate = EligibleIsolate(None, component.language,
[component])
new_isolates.add(isolate)
else:
# Store stable component, grouped by language
remaining_stable.setdefault(component.language, set()) \
.add(component)
for language, components in remaining_stable.items():
# Gather components according to their compatibility
updated, added = self.__csp_dist(map_isolates, components,
language)
updated_isolates.update(updated)
new_isolates.update(added)
# Return tuples of updated and new isolates beans
return tuple(updated_isolates), tuple(new_isolates)
def __csp_dist(self, map_isolates, components, language):
"""
Gather components using OR-Tools
:param map_isolates: A Name -> EligibleIsolate bean map
:param components: Set of components to gather
:param language: Implementation language of components
:return: A tuple: (updated isolates, new isolates)
"""
# Normalize entries (components and isolates)
components_names = sorted(component.name for component in components)
nb_components = len(components_names)
isolates_names = sorted(map_isolates.keys())
# Compute boundaries
max_isolates = max(len(components_names), len(isolates_names)) + 1
# Prepare the incompatibility matrix
incompat_matrix = self.__make_incompatibility_matrix(components_names)
# Prepare the problem solver
solver = ortools.Solver("Components distribution",
ortools.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
# Declare variables
# ... component on isolate (Iso_i <=> Iso_i_j = 1)
iso = {}
for i, name in enumerate(components_names):
for j in range(max_isolates):
iso[i, j] = solver.IntVar(0, 1, "{0} on {1}".format(name, j))
# ... assigned isolates (for the objective)
assigned_isolates = [solver.IntVar(0, 1, "Isolate {0}".format(i))
for i in range(max_isolates)]
# ... number of isolates for a component (must be 1)
nb_component_isolate = [solver.Sum(iso[i, j]
for j in range(max_isolates))
for i in range(nb_components)]
# ... number of components for an isolate
nb_isolate_components = [solver.Sum(iso[i, j]
for i in range(nb_components))
for j in range(max_isolates)]
# Constraints:
# ... 1 isolate per component
for i in range(nb_components):
solver.Add(nb_component_isolate[i] == 1)
# ... assigned isolates values must be updated
for j in range(max_isolates):
solver.Add(assigned_isolates[j]
>= nb_isolate_components[j] / nb_components)
# ... Avoid incompatible components on the same isolate
for i in range(len(incompat_matrix)):
for j in range(max_isolates):
# Pair on same isolate: sum = 2
solver.Add(iso[incompat_matrix[i][0], j]
+ iso[incompat_matrix[i][1], j]
<= assigned_isolates[j])
# Define the objective: minimize the number of isolates
nb_assigned_isolates = solver.Sum(assigned_isolates)
solver.Minimize(nb_assigned_isolates)
# Solve the problem
solver.Solve()
# Print results
_logger.info("Number of isolates.: %s",
int(solver.Objective().Value()))
_logger.info("Isolates used......: %s",
[int(assigned_isolates[i].SolutionValue())
for i in range(max_isolates)])
for i in range(nb_components):
for j in range(max_isolates):
if int(iso[i, j].SolutionValue()) == 1:
break
else:
# No isolate associated ?
j = None
_logger.info("Component %s: Isolate %s", components_names[i], j)
_logger.info("WallTime...: %s", solver.WallTime())
_logger.info("Iterations.: %s", solver.Iterations())
# TODO: Prepare result isolates
updated_isolates = set()
added_isolates = [EligibleIsolate(None, language, components)]
return updated_isolates, added_isolates
def __make_incompatibility_matrix(self, components_names):
"""
Prepares the incompatibility matrix
:param components_names: List of components names.
:return: A sorted incompatibility matrix
"""
# The incompatibility dictionary: component -> incompatible
incompat = {'Component-A': ['Nemesis-A'],
'Component-B': ['Nemesis-B']}
# Prepare the matrix (set of pairs)
incompat_matrix = set()
for name, incompat_names in incompat.items():
idx_name = components_names.index(name)
for incompat_name in incompat_names:
try:
idx_incompat = components_names.index(incompat_name)
# Store a sorted tuple (hashable)
incompat_matrix.add(tuple(
sorted((idx_name, idx_incompat))))
except ValueError:
# An incompatible component is not in the composition
pass
# Return a sorted tuple or sorted tuples
return tuple(sorted(incompat_matrix))
def handle_event(self, event):
"""
Handles a component/composition event
:param event: The event to handle
"""
# TODO: notify the crash and incompatibility stores
pass
|
yannickcr/Sick-Beard
|
refs/heads/development
|
lib/requests/packages/chardet2/euctwfreq.py
|
323
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = ( \
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
|
nocarryr/AV-Asset-Manager
|
refs/heads/master
|
avam/assets/migrations/0005_auto_20160130_1426.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-30 20:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('assets', '0004_auto_20160130_1419'),
]
operations = [
migrations.RenameField(
model_name='asset',
old_name='temp_date_acquired',
new_name='date_acquired',
),
migrations.RenameField(
model_name='asset',
old_name='temp_notes',
new_name='notes',
),
migrations.RenameField(
model_name='asset',
old_name='temp_retired',
new_name='retired',
),
migrations.RemoveField(
model_name='genericaccessory',
name='date_acquired',
),
migrations.RemoveField(
model_name='genericaccessory',
name='in_use',
),
migrations.RemoveField(
model_name='genericaccessory',
name='notes',
),
migrations.RemoveField(
model_name='genericaccessory',
name='retired',
),
migrations.RemoveField(
model_name='genericasset',
name='date_acquired',
),
migrations.RemoveField(
model_name='genericasset',
name='in_use',
),
migrations.RemoveField(
model_name='genericasset',
name='notes',
),
migrations.RemoveField(
model_name='genericasset',
name='retired',
),
migrations.RemoveField(
model_name='ledlight',
name='date_acquired',
),
migrations.RemoveField(
model_name='ledlight',
name='in_use',
),
migrations.RemoveField(
model_name='ledlight',
name='notes',
),
migrations.RemoveField(
model_name='ledlight',
name='retired',
),
migrations.RemoveField(
model_name='movinglight',
name='date_acquired',
),
migrations.RemoveField(
model_name='movinglight',
name='in_use',
),
migrations.RemoveField(
model_name='movinglight',
name='notes',
),
migrations.RemoveField(
model_name='movinglight',
name='retired',
),
migrations.RemoveField(
model_name='movinglightlamp',
name='date_acquired',
),
migrations.RemoveField(
model_name='movinglightlamp',
name='in_use',
),
migrations.RemoveField(
model_name='movinglightlamp',
name='notes',
),
migrations.RemoveField(
model_name='movinglightlamp',
name='retired',
),
migrations.RemoveField(
model_name='projector',
name='date_acquired',
),
migrations.RemoveField(
model_name='projector',
name='in_use',
),
migrations.RemoveField(
model_name='projector',
name='notes',
),
migrations.RemoveField(
model_name='projector',
name='retired',
),
migrations.RemoveField(
model_name='projectorfilter',
name='date_acquired',
),
migrations.RemoveField(
model_name='projectorfilter',
name='in_use',
),
migrations.RemoveField(
model_name='projectorfilter',
name='notes',
),
migrations.RemoveField(
model_name='projectorfilter',
name='retired',
),
migrations.RemoveField(
model_name='projectorlamp',
name='date_acquired',
),
migrations.RemoveField(
model_name='projectorlamp',
name='in_use',
),
migrations.RemoveField(
model_name='projectorlamp',
name='notes',
),
migrations.RemoveField(
model_name='projectorlamp',
name='retired',
),
]
|
vinodkc/spark
|
refs/heads/master
|
python/pyspark/pandas/tests/test_reshape.py
|
15
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from decimal import Decimal
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.utils import name_like_string
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class ReshapeTest(PandasOnSparkTestCase):
def test_get_dummies(self):
for pdf_or_ps in [
pd.Series([1, 1, 1, 2, 2, 1, 3, 4]),
# pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype='category'),
# pd.Series(pd.Categorical([1, 1, 1, 2, 2, 1, 3, 4],
# categories=[4, 3, 2, 1])),
pd.DataFrame(
{
"a": [1, 2, 3, 4, 4, 3, 2, 1],
# 'b': pd.Categorical(list('abcdabcd')),
"b": list("abcdabcd"),
}
),
pd.DataFrame({10: [1, 2, 3, 4, 4, 3, 2, 1], 20: list("abcdabcd")}),
]:
psdf_or_psser = ps.from_pandas(pdf_or_ps)
self.assert_eq(ps.get_dummies(psdf_or_psser), pd.get_dummies(pdf_or_ps, dtype=np.int8))
psser = ps.Series([1, 1, 1, 2, 2, 1, 3, 4])
with self.assertRaisesRegex(
NotImplementedError, "get_dummies currently does not support sparse"
):
ps.get_dummies(psser, sparse=True)
def test_get_dummies_object(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 4, 3, 2, 1],
# 'a': pd.Categorical([1, 2, 3, 4, 4, 3, 2, 1]),
"b": list("abcdabcd"),
# 'c': pd.Categorical(list('abcdabcd')),
"c": list("abcdabcd"),
}
)
psdf = ps.from_pandas(pdf)
# Explicitly exclude object columns
self.assert_eq(
ps.get_dummies(psdf, columns=["a", "c"]),
pd.get_dummies(pdf, columns=["a", "c"], dtype=np.int8),
)
self.assert_eq(ps.get_dummies(psdf), pd.get_dummies(pdf, dtype=np.int8))
self.assert_eq(ps.get_dummies(psdf.b), pd.get_dummies(pdf.b, dtype=np.int8))
self.assert_eq(
ps.get_dummies(psdf, columns=["b"]), pd.get_dummies(pdf, columns=["b"], dtype=np.int8)
)
self.assertRaises(KeyError, lambda: ps.get_dummies(psdf, columns=("a", "c")))
self.assertRaises(TypeError, lambda: ps.get_dummies(psdf, columns="b"))
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 4, 3, 2, 1], 20: list("abcdabcd"), 30: list("abcdabcd")}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
ps.get_dummies(psdf, columns=[10, 30]),
pd.get_dummies(pdf, columns=[10, 30], dtype=np.int8),
)
self.assertRaises(TypeError, lambda: ps.get_dummies(psdf, columns=10))
def test_get_dummies_date_datetime(self):
pdf = pd.DataFrame(
{
"d": [
datetime.date(2019, 1, 1),
datetime.date(2019, 1, 2),
datetime.date(2019, 1, 1),
],
"dt": [
datetime.datetime(2019, 1, 1, 0, 0, 0),
datetime.datetime(2019, 1, 1, 0, 0, 1),
datetime.datetime(2019, 1, 1, 0, 0, 0),
],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(ps.get_dummies(psdf), pd.get_dummies(pdf, dtype=np.int8))
self.assert_eq(ps.get_dummies(psdf.d), pd.get_dummies(pdf.d, dtype=np.int8))
self.assert_eq(ps.get_dummies(psdf.dt), pd.get_dummies(pdf.dt, dtype=np.int8))
def test_get_dummies_boolean(self):
pdf = pd.DataFrame({"b": [True, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(ps.get_dummies(psdf), pd.get_dummies(pdf, dtype=np.int8))
self.assert_eq(ps.get_dummies(psdf.b), pd.get_dummies(pdf.b, dtype=np.int8))
def test_get_dummies_decimal(self):
pdf = pd.DataFrame({"d": [Decimal(1.0), Decimal(2.0), Decimal(1)]})
psdf = ps.from_pandas(pdf)
self.assert_eq(ps.get_dummies(psdf), pd.get_dummies(pdf, dtype=np.int8))
self.assert_eq(ps.get_dummies(psdf.d), pd.get_dummies(pdf.d, dtype=np.int8), almost=True)
def test_get_dummies_kwargs(self):
# pser = pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype='category')
pser = pd.Series([1, 1, 1, 2, 2, 1, 3, 4])
psser = ps.from_pandas(pser)
self.assert_eq(
ps.get_dummies(psser, prefix="X", prefix_sep="-"),
pd.get_dummies(pser, prefix="X", prefix_sep="-", dtype=np.int8),
)
self.assert_eq(
ps.get_dummies(psser, drop_first=True),
pd.get_dummies(pser, drop_first=True, dtype=np.int8),
)
# nan
# pser = pd.Series([1, 1, 1, 2, np.nan, 3, np.nan, 5], dtype='category')
pser = pd.Series([1, 1, 1, 2, np.nan, 3, np.nan, 5])
psser = ps.from_pandas(pser)
self.assert_eq(ps.get_dummies(psser), pd.get_dummies(pser, dtype=np.int8), almost=True)
# dummy_na
self.assert_eq(
ps.get_dummies(psser, dummy_na=True), pd.get_dummies(pser, dummy_na=True, dtype=np.int8)
)
def test_get_dummies_prefix(self):
pdf = pd.DataFrame({"A": ["a", "b", "a"], "B": ["b", "a", "c"], "D": [0, 0, 1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
ps.get_dummies(psdf, prefix=["foo", "bar"]),
pd.get_dummies(pdf, prefix=["foo", "bar"], dtype=np.int8),
)
self.assert_eq(
ps.get_dummies(psdf, prefix=["foo"], columns=["B"]),
pd.get_dummies(pdf, prefix=["foo"], columns=["B"], dtype=np.int8),
)
self.assert_eq(
ps.get_dummies(psdf, prefix={"A": "foo", "B": "bar"}),
pd.get_dummies(pdf, prefix={"A": "foo", "B": "bar"}, dtype=np.int8),
)
self.assert_eq(
ps.get_dummies(psdf, prefix={"B": "foo", "A": "bar"}),
pd.get_dummies(pdf, prefix={"B": "foo", "A": "bar"}, dtype=np.int8),
)
self.assert_eq(
ps.get_dummies(psdf, prefix={"A": "foo", "B": "bar"}, columns=["A", "B"]),
pd.get_dummies(pdf, prefix={"A": "foo", "B": "bar"}, columns=["A", "B"], dtype=np.int8),
)
with self.assertRaisesRegex(NotImplementedError, "string types"):
ps.get_dummies(psdf, prefix="foo")
with self.assertRaisesRegex(ValueError, "Length of 'prefix' \\(1\\) .* \\(2\\)"):
ps.get_dummies(psdf, prefix=["foo"])
with self.assertRaisesRegex(ValueError, "Length of 'prefix' \\(2\\) .* \\(1\\)"):
ps.get_dummies(psdf, prefix=["foo", "bar"], columns=["B"])
pser = pd.Series([1, 1, 1, 2, 2, 1, 3, 4], name="A")
psser = ps.from_pandas(pser)
self.assert_eq(
ps.get_dummies(psser, prefix="foo"), pd.get_dummies(pser, prefix="foo", dtype=np.int8)
)
# columns are ignored.
self.assert_eq(
ps.get_dummies(psser, prefix=["foo"], columns=["B"]),
pd.get_dummies(pser, prefix=["foo"], columns=["B"], dtype=np.int8),
)
def test_get_dummies_dtype(self):
pdf = pd.DataFrame(
{
# "A": pd.Categorical(['a', 'b', 'a'], categories=['a', 'b', 'c']),
"A": ["a", "b", "a"],
"B": [0, 0, 1],
}
)
psdf = ps.from_pandas(pdf)
if LooseVersion("0.23.0") <= LooseVersion(pd.__version__):
exp = pd.get_dummies(pdf, dtype="float64")
else:
exp = pd.get_dummies(pdf)
exp = exp.astype({"A_a": "float64", "A_b": "float64"})
res = ps.get_dummies(psdf, dtype="float64")
self.assert_eq(res, exp)
def test_get_dummies_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3, 4, 4, 3, 2, 1],
("x", "b", "2"): list("abcdabcd"),
("y", "c", "3"): list("abcdabcd"),
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
ps.get_dummies(psdf),
pd.get_dummies(pdf, dtype=np.int8).rename(columns=name_like_string),
)
self.assert_eq(
ps.get_dummies(psdf, columns=[("y", "c", "3"), ("x", "a", "1")]),
pd.get_dummies(pdf, columns=[("y", "c", "3"), ("x", "a", "1")], dtype=np.int8).rename(
columns=name_like_string
),
)
self.assert_eq(
ps.get_dummies(psdf, columns=["x"]),
pd.get_dummies(pdf, columns=["x"], dtype=np.int8).rename(columns=name_like_string),
)
self.assert_eq(
ps.get_dummies(psdf, columns=("x", "a")),
pd.get_dummies(pdf, columns=("x", "a"), dtype=np.int8).rename(columns=name_like_string),
)
self.assertRaises(KeyError, lambda: ps.get_dummies(psdf, columns=["z"]))
self.assertRaises(KeyError, lambda: ps.get_dummies(psdf, columns=("x", "c")))
self.assertRaises(ValueError, lambda: ps.get_dummies(psdf, columns=[("x",), "c"]))
self.assertRaises(TypeError, lambda: ps.get_dummies(psdf, columns="x"))
# non-string names
pdf = pd.DataFrame(
{
("x", 1, "a"): [1, 2, 3, 4, 4, 3, 2, 1],
("x", 2, "b"): list("abcdabcd"),
("y", 3, "c"): list("abcdabcd"),
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
ps.get_dummies(psdf),
pd.get_dummies(pdf, dtype=np.int8).rename(columns=name_like_string),
)
self.assert_eq(
ps.get_dummies(psdf, columns=[("y", 3, "c"), ("x", 1, "a")]),
pd.get_dummies(pdf, columns=[("y", 3, "c"), ("x", 1, "a")], dtype=np.int8).rename(
columns=name_like_string
),
)
self.assert_eq(
ps.get_dummies(psdf, columns=["x"]),
pd.get_dummies(pdf, columns=["x"], dtype=np.int8).rename(columns=name_like_string),
)
self.assert_eq(
ps.get_dummies(psdf, columns=("x", 1)),
pd.get_dummies(pdf, columns=("x", 1), dtype=np.int8).rename(columns=name_like_string),
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_reshape import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
h4ck3rm1k3/github3.py
|
refs/heads/develop
|
tests/test_users.py
|
9
|
import github3
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from tests.utils import (BaseCase, load)
from datetime import datetime
class TestKey(BaseCase):
def __init__(self, methodName='runTest'):
super(TestKey, self).__init__(methodName)
self.key = github3.users.Key(load('key'))
self.api = "https://api.github.com/user/keys/10"
def setUp(self):
super(TestKey, self).setUp()
self.key = github3.users.Key(self.key.as_dict(), self.g)
def test_equality(self):
k = github3.users.Key(self.key.as_dict())
assert self.key == k
k._uniq += "cruft"
assert self.key != k
def test_str(self):
assert str(self.key) == self.key.key
assert repr(self.key).startswith('<User Key')
def test_delete(self):
self.response('', 204)
self.delete(self.api)
self.assertRaises(github3.GitHubError, self.key.delete)
self.not_called()
self.login()
assert self.key.delete()
self.mock_assertions()
def test_update(self):
self.response('key', 200)
self.patch(self.api)
self.conf = {
'data': {
'key': 'fakekey',
'title': 'New title',
}
}
self.assertRaises(github3.GitHubError, self.key.update, None, None)
self.login()
assert self.key.update(None, None) is False
self.not_called()
assert self.key.update(**self.conf['data'])
self.mock_assertions()
class TestPlan(BaseCase):
def __init__(self, methodName='runTest'):
super(TestPlan, self).__init__(methodName)
self.plan = github3.users.Plan({
'name': 'free',
'space': 400,
'collaborators': 10,
'private_repos': 20,
})
def test_str(self):
assert str(self.plan) == self.plan.name
assert repr(self.plan) == '<Plan [free]>'
assert self.plan.is_free()
class TestUser(BaseCase):
def __init__(self, methodName='runTest'):
super(TestUser, self).__init__(methodName)
self.user = github3.users.User(load('user'))
self.api = "https://api.github.com/users/sigmavirus24"
def setUp(self):
super(TestUser, self).setUp()
self.user = github3.users.User(self.user.as_dict(), self.g)
if hasattr(self.user.name, 'decode'):
self.user.name = self.user.name.decode('utf-8')
def test_refresh(self):
"""This sort of tests all instances of refresh for good measure."""
self.response('', 304)
self.get(self.api)
self.user.last_modified = last_modified = datetime.now().strftime(
'%a, %d %b %Y %H:%M:%S GMT'
)
self.user.etag = etag = '644b5b0155e6404a9cc4bd9d8b1ae730'
expected_headers = {
'If-Modified-Since': last_modified,
}
self.user.refresh(True)
self.request.assert_called_with('GET', self.api,
headers=expected_headers,
allow_redirects=True)
self.user.last_modified = None
expected_headers = {
'If-None-Match': etag
}
self.user.refresh(True)
self.request.assert_called_with('GET', self.api,
headers=expected_headers,
allow_redirects=True)
self.response('user', 200)
self.user.refresh()
self.mock_assertions()
def test_str(self):
assert str(self.user) == 'sigmavirus24'
assert repr(self.user) == '<User [sigmavirus24:Ian Cordasco]>'
def test_add_email_address(self):
self.assertRaises(github3.GitHubError, self.user.add_email_address,
'foo')
self.not_called()
self.login()
with patch.object(github3.users.User, 'add_email_addresses') as p:
self.user.add_email_address('foo')
p.assert_called_once_with(['foo'])
def test_add_email_addresses(self):
self.response('emails', 201, _iter=True)
self.post(self.github_url + 'user/emails')
self.conf = {
'data': '["foo@bar.com"]',
}
self.assertRaises(github3.GitHubError, self.user.add_email_addresses,
[])
self.not_called()
self.login()
self.user.add_email_addresses(['foo@bar.com'])
self.mock_assertions()
def test_delete_email_address(self):
self.assertRaises(github3.GitHubError, self.user.delete_email_address,
'foo')
self.not_called()
self.login()
with patch.object(github3.users.User, 'delete_email_addresses') as p:
self.user.delete_email_address('foo')
p.assert_called_once_with(['foo'])
def test_delete_email_addresses(self):
self.response('', 204)
self.delete(self.github_url + 'user/emails')
self.conf = {
'data': '["foo@bar.com"]'
}
self.assertRaises(github3.GitHubError,
self.user.delete_email_addresses,
[])
self.not_called()
self.login()
assert self.user.delete_email_addresses(['foo@bar.com'])
self.mock_assertions()
def test_is_assignee_on(self):
self.response('', 404)
self.get(self.github_url + 'repos/abc/def/assignees/sigmavirus24')
assert self.user.is_assignee_on('abc', 'def') is False
self.mock_assertions()
def test_is_following(self):
self.response('', 204)
self.get(self.api + '/following/kennethreitz')
assert self.user.is_following('kennethreitz')
self.mock_assertions()
def test_equality(self):
u = github3.users.User(load('user'))
assert self.user == u
u._uniq += 1
assert self.user != u
|
Alwnikrotikz/autokey
|
refs/heads/master
|
src/lib/gtkui/dialogs.py
|
47
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Chris Dekter
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging, sys, os, re
from gi.repository import Gtk, Gdk
#import gettext
import locale
GETTEXT_DOMAIN = 'autokey'
locale.setlocale(locale.LC_ALL, '')
#for module in Gtk.glade, gettext:
# module.bindtextdomain(GETTEXT_DOMAIN)
# module.textdomain(GETTEXT_DOMAIN)
__all__ = ["validate", "EMPTY_FIELD_REGEX", "AbbrSettingsDialog", "HotkeySettingsDialog", "WindowFilterSettingsDialog", "RecordDialog"]
from autokey import model, iomediator
import configwindow
WORD_CHAR_OPTIONS = {
"All non-word" : model.DEFAULT_WORDCHAR_REGEX,
"Space and Enter" : r"[^ \n]",
"Tab" : r"[^\t]"
}
WORD_CHAR_OPTIONS_ORDERED = ["All non-word", "Space and Enter", "Tab"]
EMPTY_FIELD_REGEX = re.compile(r"^ *$", re.UNICODE)
def validate(expression, message, widget, parent):
if not expression:
dlg = Gtk.MessageDialog(parent, Gtk.DialogFlags.MODAL|Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK, message)
dlg.run()
dlg.destroy()
if widget is not None:
widget.grab_focus()
return expression
class DialogBase:
def __init__(self):
self.connect("close", self.on_close)
self.connect("delete_event", self.on_close)
def on_close(self, widget, data=None):
self.hide()
return True
def on_cancel(self, widget, data=None):
self.load(self.targetItem)
self.ui.response(Gtk.ResponseType.CANCEL)
self.hide()
def on_ok(self, widget, data=None):
if self.valid():
self.response(Gtk.ResponseType.OK)
self.hide()
def __getattr__(self, attr):
# Magic fudge to allow us to pretend to be the ui class we encapsulate
return getattr(self.ui, attr)
def on_response(self, widget, responseId):
self.closure(responseId)
if responseId < 0:
self.hide()
self.emit_stop_by_name('response')
class AbbrSettingsDialog(DialogBase):
def __init__(self, parent, configManager, closure):
builder = configwindow.get_ui("abbrsettings.xml")
self.ui = builder.get_object("abbrsettings")
builder.connect_signals(self)
self.ui.set_transient_for(parent)
self.configManager = configManager
self.closure = closure
self.abbrList = builder.get_object("abbrList")
self.addButton = builder.get_object("addButton")
self.removeButton = builder.get_object("removeButton")
self.wordCharCombo = builder.get_object("wordCharCombo")
self.removeTypedCheckbox = builder.get_object("removeTypedCheckbox")
self.omitTriggerCheckbox = builder.get_object("omitTriggerCheckbox")
self.matchCaseCheckbox = builder.get_object("matchCaseCheckbox")
self.ignoreCaseCheckbox = builder.get_object("ignoreCaseCheckbox")
self.triggerInsideCheckbox = builder.get_object("triggerInsideCheckbox")
self.immediateCheckbox = builder.get_object("immediateCheckbox")
DialogBase.__init__(self)
# set up list view
store = Gtk.ListStore(str)
self.abbrList.set_model(store)
column1 = Gtk.TreeViewColumn(_("Abbreviations"))
textRenderer = Gtk.CellRendererText()
textRenderer.set_property("editable", True)
textRenderer.connect("edited", self.on_cell_modified)
textRenderer.connect("editing-canceled", self.on_cell_editing_cancelled)
column1.pack_end(textRenderer, True)
column1.add_attribute(textRenderer, "text", 0)
column1.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
self.abbrList.append_column(column1)
for item in WORD_CHAR_OPTIONS_ORDERED:
self.wordCharCombo.append_text(item)
def load(self, item):
self.targetItem = item
self.abbrList.get_model().clear()
if model.TriggerMode.ABBREVIATION in item.modes:
for abbr in item.abbreviations:
self.abbrList.get_model().append((abbr.encode("utf-8"),))
self.removeButton.set_sensitive(True)
firstIter = self.abbrList.get_model().get_iter_first()
self.abbrList.get_selection().select_iter(firstIter)
else:
self.removeButton.set_sensitive(False)
self.removeTypedCheckbox.set_active(item.backspace)
self.__resetWordCharCombo()
wordCharRegex = item.get_word_chars()
if wordCharRegex in WORD_CHAR_OPTIONS.values():
# Default wordchar regex used
for desc, regex in WORD_CHAR_OPTIONS.iteritems():
if item.get_word_chars() == regex:
self.wordCharCombo.set_active(WORD_CHAR_OPTIONS_ORDERED.index(desc))
break
else:
# Custom wordchar regex used
self.wordCharCombo.append_text(model.extract_wordchars(wordCharRegex).encode("utf-8"))
self.wordCharCombo.set_active(len(WORD_CHAR_OPTIONS))
if isinstance(item, model.Folder):
self.omitTriggerCheckbox.hide()
else:
self.omitTriggerCheckbox.show()
self.omitTriggerCheckbox.set_active(item.omitTrigger)
if isinstance(item, model.Phrase):
self.matchCaseCheckbox.show()
self.matchCaseCheckbox.set_active(item.matchCase)
else:
self.matchCaseCheckbox.hide()
self.ignoreCaseCheckbox.set_active(item.ignoreCase)
self.triggerInsideCheckbox.set_active(item.triggerInside)
self.immediateCheckbox.set_active(item.immediate)
def save(self, item):
item.modes.append(model.TriggerMode.ABBREVIATION)
item.clear_abbreviations()
item.abbreviations = self.get_abbrs()
item.backspace = self.removeTypedCheckbox.get_active()
option = self.wordCharCombo.get_active_text()
if option in WORD_CHAR_OPTIONS:
item.set_word_chars(WORD_CHAR_OPTIONS[option])
else:
item.set_word_chars(model.make_wordchar_re(option))
if not isinstance(item, model.Folder):
item.omitTrigger = self.omitTriggerCheckbox.get_active()
if isinstance(item, model.Phrase):
item.matchCase = self.matchCaseCheckbox.get_active()
item.ignoreCase = self.ignoreCaseCheckbox.get_active()
item.triggerInside = self.triggerInsideCheckbox.get_active()
item.immediate = self.immediateCheckbox.get_active()
def reset(self):
self.abbrList.get_model().clear()
self.__resetWordCharCombo()
self.removeButton.set_sensitive(False)
self.wordCharCombo.set_active(0)
self.omitTriggerCheckbox.set_active(False)
self.removeTypedCheckbox.set_active(True)
self.matchCaseCheckbox.set_active(False)
self.ignoreCaseCheckbox.set_active(False)
self.triggerInsideCheckbox.set_active(False)
self.immediateCheckbox.set_active(False)
def __resetWordCharCombo(self):
self.wordCharCombo.remove_all()
for item in WORD_CHAR_OPTIONS_ORDERED:
self.wordCharCombo.append_text(item)
self.wordCharCombo.set_active(0)
def get_abbrs(self):
ret = []
model = self.abbrList.get_model()
i = iter(model)
try:
while True:
text = model.get_value(i.next().iter, 0)
ret.append(text.decode("utf-8"))
except StopIteration:
pass
return list(set(ret))
def get_abbrs_readable(self):
abbrs = self.get_abbrs()
if len(abbrs) == 1:
return abbrs[0].encode("utf-8")
else:
return "[%s]" % ','.join([a.encode("utf-8") for a in abbrs])
def valid(self):
if not validate(len(self.get_abbrs()) > 0, _("You must specify at least one abbreviation"),
self.addButton, self.ui): return False
return True
def reset_focus(self):
self.addButton.grab_focus()
# Signal handlers
def on_cell_editing_cancelled(self, renderer, data=None):
model, curIter = self.abbrList.get_selection().get_selected()
oldText = model.get_value(curIter, 0) or ""
self.on_cell_modified(renderer, None, oldText)
def on_cell_modified(self, renderer, path, newText, data=None):
model, curIter = self.abbrList.get_selection().get_selected()
oldText = model.get_value(curIter, 0) or ""
if EMPTY_FIELD_REGEX.match(newText) and EMPTY_FIELD_REGEX.match(oldText):
self.on_removeButton_clicked(renderer)
else:
model.set(curIter, 0, newText)
def on_addButton_clicked(self, widget, data=None):
model = self.abbrList.get_model()
newIter = model.append()
self.abbrList.set_cursor(model.get_path(newIter), self.abbrList.get_column(0), True)
self.removeButton.set_sensitive(True)
def on_removeButton_clicked(self, widget, data=None):
model, curIter = self.abbrList.get_selection().get_selected()
model.remove(curIter)
if model.get_iter_first() is None:
self.removeButton.set_sensitive(False)
else:
self.abbrList.get_selection().select_iter(model.get_iter_first())
def on_abbrList_cursorchanged(self, widget, data=None):
pass
def on_ignoreCaseCheckbox_stateChanged(self, widget, data=None):
if not self.ignoreCaseCheckbox.get_active():
self.matchCaseCheckbox.set_active(False)
def on_matchCaseCheckbox_stateChanged(self, widget, data=None):
if self.matchCaseCheckbox.get_active():
self.ignoreCaseCheckbox.set_active(True)
def on_immediateCheckbox_stateChanged(self, widget, data=None):
if self.immediateCheckbox.get_active():
self.omitTriggerCheckbox.set_active(False)
self.omitTriggerCheckbox.set_sensitive(False)
self.wordCharCombo.set_sensitive(False)
else:
self.omitTriggerCheckbox.set_sensitive(True)
self.wordCharCombo.set_sensitive(True)
class HotkeySettingsDialog(DialogBase):
KEY_MAP = {
' ' : "<space>",
}
REVERSE_KEY_MAP = {}
for key, value in KEY_MAP.iteritems():
REVERSE_KEY_MAP[value] = key
def __init__(self, parent, configManager, closure):
builder = configwindow.get_ui("hotkeysettings.xml")
self.ui = builder.get_object("hotkeysettings")
builder.connect_signals(self)
self.ui.set_transient_for(parent)
self.configManager = configManager
self.closure = closure
self.key = None
self.controlButton = builder.get_object("controlButton")
self.altButton = builder.get_object("altButton")
self.shiftButton = builder.get_object("shiftButton")
self.superButton = builder.get_object("superButton")
self.hyperButton = builder.get_object("hyperButton")
self.metaButton = builder.get_object("metaButton")
self.setButton = builder.get_object("setButton")
self.keyLabel = builder.get_object("keyLabel")
DialogBase.__init__(self)
def load(self, item):
self.targetItem = item
self.setButton.set_sensitive(True)
if model.TriggerMode.HOTKEY in item.modes:
self.controlButton.set_active(iomediator.Key.CONTROL in item.modifiers)
self.altButton.set_active(iomediator.Key.ALT in item.modifiers)
self.shiftButton.set_active(iomediator.Key.SHIFT in item.modifiers)
self.superButton.set_active(iomediator.Key.SUPER in item.modifiers)
self.hyperButton.set_active(iomediator.Key.HYPER in item.modifiers)
self.metaButton.set_active(iomediator.Key.META in item.modifiers)
key = item.hotKey
if key in self.KEY_MAP:
keyText = self.KEY_MAP[key]
else:
keyText = key
self._setKeyLabel(keyText)
self.key = keyText
else:
self.reset()
def save(self, item):
item.modes.append(model.TriggerMode.HOTKEY)
# Build modifier list
modifiers = self.build_modifiers()
keyText = self.key
if keyText in self.REVERSE_KEY_MAP:
key = self.REVERSE_KEY_MAP[keyText]
else:
key = keyText
assert key != None, "Attempt to set hotkey with no key"
item.set_hotkey(modifiers, key)
def reset(self):
self.controlButton.set_active(False)
self.altButton.set_active(False)
self.shiftButton.set_active(False)
self.superButton.set_active(False)
self.hyperButton.set_active(False)
self.metaButton.set_active(False)
self._setKeyLabel(_("(None)"))
self.key = None
self.setButton.set_sensitive(True)
def set_key(self, key, modifiers=[]):
Gdk.threads_enter()
if self.KEY_MAP.has_key(key):
key = self.KEY_MAP[key]
self._setKeyLabel(key)
self.key = key
self.controlButton.set_active(iomediator.Key.CONTROL in modifiers)
self.altButton.set_active(iomediator.Key.ALT in modifiers)
self.shiftButton.set_active(iomediator.Key.SHIFT in modifiers)
self.superButton.set_active(iomediator.Key.SUPER in modifiers)
self.hyperButton.set_active(iomediator.Key.HYPER in modifiers)
self.metaButton.set_active(iomediator.Key.META in modifiers)
self.setButton.set_sensitive(True)
Gdk.threads_leave()
def cancel_grab(self):
Gdk.threads_enter()
self.setButton.set_sensitive(True)
self._setKeyLabel(self.key)
Gdk.threads_leave()
def build_modifiers(self):
modifiers = []
if self.controlButton.get_active():
modifiers.append(iomediator.Key.CONTROL)
if self.altButton.get_active():
modifiers.append(iomediator.Key.ALT)
if self.shiftButton.get_active():
modifiers.append(iomediator.Key.SHIFT)
if self.superButton.get_active():
modifiers.append(iomediator.Key.SUPER)
if self.hyperButton.get_active():
modifiers.append(iomediator.Key.HYPER)
if self.metaButton.get_active():
modifiers.append(iomediator.Key.META)
modifiers.sort()
return modifiers
def _setKeyLabel(self, key):
self.keyLabel.set_text(_("Key: ") + key)
def valid(self):
if not validate(self.key is not None, _("You must specify a key for the hotkey."),
None, self.ui): return False
return True
def on_setButton_pressed(self, widget, data=None):
self.setButton.set_sensitive(False)
self.keyLabel.set_text(_("Press a key..."))
self.grabber = iomediator.KeyGrabber(self)
self.grabber.start()
class GlobalHotkeyDialog(HotkeySettingsDialog):
def load(self, item):
self.targetItem = item
if item.enabled:
self.controlButton.set_active(iomediator.Key.CONTROL in item.modifiers)
self.altButton.set_active(iomediator.Key.ALT in item.modifiers)
self.shiftButton.set_active(iomediator.Key.SHIFT in item.modifiers)
self.superButton.set_active(iomediator.Key.SUPER in item.modifiers)
self.hyperButton.set_active(iomediator.Key.HYPER in item.modifiers)
self.metaButton.set_active(iomediator.Key.META in item.modifiers)
key = item.hotKey
if key in self.KEY_MAP:
keyText = self.KEY_MAP[key]
else:
keyText = key
self._setKeyLabel(keyText)
self.key = keyText
else:
self.reset()
def save(self, item):
# Build modifier list
modifiers = self.build_modifiers()
keyText = self.key
if keyText in self.REVERSE_KEY_MAP:
key = self.REVERSE_KEY_MAP[keyText]
else:
key = keyText
assert key != None, "Attempt to set hotkey with no key"
item.set_hotkey(modifiers, key)
def valid(self):
configManager = self.configManager
modifiers = self.build_modifiers()
regex = self.targetItem.get_applicable_regex()
pattern = None
if regex is not None: pattern = regex.pattern
unique, conflicting = configManager.check_hotkey_unique(modifiers, self.key, pattern, self.targetItem)
if not validate(unique, _("The hotkey is already in use for %s.") % conflicting, None,
self.ui): return False
if not validate(self.key is not None, _("You must specify a key for the hotkey."),
None, self.ui): return False
return True
class WindowFilterSettingsDialog(DialogBase):
def __init__(self, parent, closure):
builder = configwindow.get_ui("windowfiltersettings.xml")
self.ui = builder.get_object("windowfiltersettings")
builder.connect_signals(self)
self.ui.set_transient_for(parent)
self.closure = closure
self.triggerRegexEntry = builder.get_object("triggerRegexEntry")
self.recursiveButton = builder.get_object("recursiveButton")
self.detectButton = builder.get_object("detectButton")
DialogBase.__init__(self)
def load(self, item):
self.targetItem = item
if not isinstance(item, model.Folder):
self.recursiveButton.hide()
else:
self.recursiveButton.show()
if not item.has_filter():
self.reset()
else:
self.triggerRegexEntry.set_text(item.get_filter_regex())
self.recursiveButton.set_active(item.isRecursive)
def save(self, item):
item.set_window_titles(self.get_filter_text())
item.set_filter_recursive(self.get_is_recursive())
def reset(self):
self.triggerRegexEntry.set_text("")
self.recursiveButton.set_active(False)
def get_filter_text(self):
return self.triggerRegexEntry.get_text().decode("utf-8")
def get_is_recursive(self):
return self.recursiveButton.get_active()
def valid(self):
return True
def reset_focus(self):
self.triggerRegexEntry.grab_focus()
def on_response(self, widget, responseId):
self.closure(responseId)
def receive_window_info(self, info):
Gdk.threads_enter()
dlg = DetectDialog(self.ui)
dlg.populate(info)
response = dlg.run()
if response == Gtk.ResponseType.OK:
self.triggerRegexEntry.set_text(dlg.get_choice().encode("utf-8"))
self.detectButton.set_sensitive(True)
Gdk.threads_leave()
def on_detectButton_pressed(self, widget, data=None):
#self.__dlg =
widget.set_sensitive(False)
self.grabber = iomediator.WindowGrabber(self)
self.grabber.start()
class DetectDialog(DialogBase):
def __init__(self, parent):
builder = configwindow.get_ui("detectdialog.xml")
self.ui = builder.get_object("detectdialog")
builder.connect_signals(self)
self.ui.set_transient_for(parent)
self.classLabel = builder.get_object("classLabel")
self.titleLabel = builder.get_object("titleLabel")
self.classRadioButton = builder.get_object("classRadioButton")
self.titleRadioButton = builder.get_object("titleRadioButton")
DialogBase.__init__(self)
def populate(self, windowInfo):
self.titleLabel.set_text(_("Window title: %s") % windowInfo[0].encode("utf-8"))
self.classLabel.set_text(_("Window class: %s") % windowInfo[1].encode("utf-8"))
self.windowInfo = windowInfo
def get_choice(self):
if self.classRadioButton.get_active():
return self.windowInfo[1]
else:
return self.windowInfo[0]
def on_cancel(self, widget, data=None):
self.ui.response(Gtk.ResponseType.CANCEL)
self.hide()
def on_ok(self, widget, data=None):
self.response(Gtk.ResponseType.OK)
self.hide()
class RecordDialog(DialogBase):
def __init__(self, parent, closure):
self.closure = closure
builder = configwindow.get_ui("recorddialog.xml")
self.ui = builder.get_object("recorddialog")
builder.connect_signals(self)
self.ui.set_transient_for(parent)
self.keyboardButton = builder.get_object("keyboardButton")
self.mouseButton = builder.get_object("mouseButton")
self.spinButton = builder.get_object("spinButton")
DialogBase.__init__(self)
def get_record_keyboard(self):
return self.keyboardButton.get_active()
def get_record_mouse(self):
return self.mouseButton.get_active()
def get_delay(self):
return self.spinButton.get_value_as_int()
def on_response(self, widget, responseId):
self.closure(responseId, self.get_record_keyboard(), self.get_record_mouse(), self.get_delay())
def on_cancel(self, widget, data=None):
self.ui.response(Gtk.ResponseType.CANCEL)
self.hide()
def valid(self):
return True
|
wateraccounting/wa
|
refs/heads/master
|
Functions/Start/__init__.py
|
1
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels and Gonzalo Espinoza
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
g.espinoza@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: wa/Functions/Start
Description:
This module contains a compilation of scripts and functions used to calculate the sheet.
This data is used within a water accounting framework.
(http://www.wateraccounting.org/)
"""
from wa.Functions.Start import Area_converter, Boundaries, Download_Data, Eightdaily_to_monthly_state, Get_Dictionaries, Weekly_to_monthly_flux, Sixteendaily_to_monthly_state, Monthly_to_yearly_flux, Day_to_monthly_flux, WaitbarConsole
__all__ = ['Area_converter', 'Boundaries', 'Download_Data','Eightdaily_to_monthly_state', 'Get_Dictionaries', 'Weekly_to_monthly_flux', 'Sixteendaily_to_monthly_state', 'Monthly_to_yearly_flux', 'Day_to_monthly_flux', 'WaitbarConsole']
__version__ = '0.1'
|
harshitamistry/calligraRepository
|
refs/heads/master
|
3rdparty/google-breakpad/src/tools/gyp/test/mac/gyptest-archs.py
|
96
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests things related to ARCHS.
"""
import TestGyp
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
def CheckFileType(file, expected):
proc = subprocess.Popen(['file', '-b', file], stdout=subprocess.PIPE)
o = proc.communicate()[0].strip()
assert not proc.returncode
if o != expected:
print 'File: Expected %s, got %s' % (expected, o)
test.fail_test()
test.run_gyp('test-no-archs.gyp', chdir='archs')
test.build('test-no-archs.gyp', test.ALL, chdir='archs')
result_file = test.built_file_path('Test', chdir='archs')
test.must_exist(result_file)
CheckFileType(result_file, 'Mach-O executable i386')
test.run_gyp('test-archs-x86_64.gyp', chdir='archs')
test.build('test-archs-x86_64.gyp', test.ALL, chdir='archs')
result_file = test.built_file_path('Test64', chdir='archs')
test.must_exist(result_file)
CheckFileType(result_file, 'Mach-O 64-bit executable x86_64')
|
marckuz/django
|
refs/heads/master
|
tests/test_runner_deprecation_app/__init__.py
|
12133432
| |
piperck/redis-py
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
LamCiuLoeng/luigi
|
refs/heads/master
|
test/contrib/__init__.py
|
12133432
| |
benh/twesos
|
refs/heads/master
|
third_party/zookeeper-3.3.1/src/contrib/zkpython/src/test/connection_test.py
|
26
|
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest, threading
import zookeeper, zktestbase
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
class ConnectionTest(zktestbase.TestBase):
"""Test whether we can make a connection"""
def setUp(self):
pass
def testconnection(self):
cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.connected = True
self.assertEqual(zookeeper.CONNECTED_STATE, state)
self.handle = handle
cv.notify()
cv.release()
cv.acquire()
ret = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
self.assertEqual(self.connected, True, "Connection timed out to " + self.host)
self.assertEqual(zookeeper.CONNECTED_STATE, zookeeper.state(self.handle))
self.assertEqual(zookeeper.close(self.handle), zookeeper.OK)
# Trying to close the same handle twice is an error, and the C library will segfault on it
# so make sure this is caught at the Python module layer
self.assertRaises(zookeeper.ZooKeeperException,
zookeeper.close,
self.handle)
self.assertRaises(zookeeper.ZooKeeperException,
zookeeper.get,
self.handle,
"/")
def testhandlereuse(self):
"""
Test a) multiple concurrent connections b) reuse of closed handles
"""
cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.connected = True
self.assertEqual(zookeeper.CONNECTED_STATE, state)
self.handle = handle
cv.notify()
cv.release()
cv.acquire()
handles = [ zookeeper.init(self.host) for i in xrange(10) ]
ret = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
self.assertEqual(self.connected, True, "Connection timed out to " + self.host)
self.assertEqual(True, all( [ zookeeper.state(handle) == zookeeper.CONNECTED_STATE for handle in handles ] ),
"Not all connections succeeded")
oldhandle = handles[3]
zookeeper.close(oldhandle)
newhandle = zookeeper.init(self.host)
# This assertion tests *internal* behaviour; i.e. that the module
# correctly reuses closed handles. This is therefore implementation
# dependent.
self.assertEqual(newhandle, oldhandle, "Didn't get reused handle")
def testmanyhandles(self):
"""
Test the ability of the module to support many handles.
"""
# We'd like to do more, but currently the C client doesn't
# work with > 83 handles (fails to create a pipe) on MacOS 10.5.8
handles = [ zookeeper.init(self.host) for i in xrange(63) ]
cv = threading.Condition()
self.connected = False
def connection_watcher(handle, type, state, path):
cv.acquire()
self.connected = True
self.assertEqual(zookeeper.CONNECTED_STATE, state)
self.handle = handle
cv.notify()
cv.release()
cv.acquire()
ret = zookeeper.init(self.host, connection_watcher)
cv.wait(15.0)
cv.release()
self.assertEqual(self.connected, True, "Connection timed out to " + self.host)
for i,h in enumerate(handles):
path = "/zkpython-test-handles-%s" % str(i)
self.assertEqual(path, zookeeper.create(h, path, "", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL))
self.assertEqual(True, all( zookeeper.close(h) == zookeeper.OK for h in handles ))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
aronsky/home-assistant
|
refs/heads/dev
|
tests/components/fan/test_zwave.py
|
21
|
"""Test Z-Wave fans."""
from homeassistant.components.fan import (
zwave, SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, SUPPORT_SET_SPEED)
from tests.mock.zwave import (
MockNode, MockValue, MockEntityValues, value_changed)
def test_get_device_detects_fan(mock_openzwave):
"""Test get_device returns a zwave fan."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = zwave.get_device(node=node, values=values, node_config={})
assert isinstance(device, zwave.ZwaveFan)
assert device.supported_features == SUPPORT_SET_SPEED
assert device.speed_list == [
SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
def test_fan_turn_on(mock_openzwave):
"""Test turning on a zwave fan."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = zwave.get_device(node=node, values=values, node_config={})
device.turn_on()
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 255
node.reset_mock()
device.turn_on(speed=SPEED_OFF)
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 0
node.reset_mock()
device.turn_on(speed=SPEED_LOW)
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 1
node.reset_mock()
device.turn_on(speed=SPEED_MEDIUM)
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 50
node.reset_mock()
device.turn_on(speed=SPEED_HIGH)
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 99
def test_fan_turn_off(mock_openzwave):
"""Test turning off a dimmable zwave fan."""
node = MockNode()
value = MockValue(data=46, node=node)
values = MockEntityValues(primary=value)
device = zwave.get_device(node=node, values=values, node_config={})
device.turn_off()
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 0
def test_fan_value_changed(mock_openzwave):
"""Test value changed for zwave fan."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = zwave.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = 10
value_changed(value)
assert device.is_on
assert device.speed == SPEED_LOW
value.data = 50
value_changed(value)
assert device.is_on
assert device.speed == SPEED_MEDIUM
value.data = 90
value_changed(value)
assert device.is_on
assert device.speed == SPEED_HIGH
|
mezz64/home-assistant
|
refs/heads/dev
|
tests/components/nightscout/__init__.py
|
6
|
"""Tests for the Nightscout integration."""
import json
from aiohttp import ClientConnectionError
from py_nightscout.models import SGV, ServerStatus
from homeassistant.components.nightscout.const import DOMAIN
from homeassistant.const import CONF_URL
from tests.async_mock import patch
from tests.common import MockConfigEntry
GLUCOSE_READINGS = [
SGV.new_from_json_dict(
json.loads(
'{"_id":"5f2b01f5c3d0ac7c4090e223","device":"xDrip-LimiTTer","date":1596654066533,"dateString":"2020-08-05T19:01:06.533Z","sgv":169,"delta":-5.257,"direction":"FortyFiveDown","type":"sgv","filtered":182823.5157,"unfiltered":182823.5157,"rssi":100,"noise":1,"sysTime":"2020-08-05T19:01:06.533Z","utcOffset":-180}'
)
)
]
SERVER_STATUS = ServerStatus.new_from_json_dict(
json.loads(
'{"status":"ok","name":"nightscout","version":"13.0.1","serverTime":"2020-08-05T18:14:02.032Z","serverTimeEpoch":1596651242032,"apiEnabled":true,"careportalEnabled":true,"boluscalcEnabled":true,"settings":{},"extendedSettings":{},"authorized":null}'
)
)
SERVER_STATUS_STATUS_ONLY = ServerStatus.new_from_json_dict(
json.loads(
'{"status":"ok","name":"nightscout","version":"14.0.4","serverTime":"2020-09-25T21:03:59.315Z","serverTimeEpoch":1601067839315,"apiEnabled":true,"careportalEnabled":true,"boluscalcEnabled":true,"settings":{"units":"mg/dl","timeFormat":12,"nightMode":false,"editMode":true,"showRawbg":"never","customTitle":"Nightscout","theme":"default","alarmUrgentHigh":true,"alarmUrgentHighMins":[30,60,90,120],"alarmHigh":true,"alarmHighMins":[30,60,90,120],"alarmLow":true,"alarmLowMins":[15,30,45,60],"alarmUrgentLow":true,"alarmUrgentLowMins":[15,30,45],"alarmUrgentMins":[30,60,90,120],"alarmWarnMins":[30,60,90,120],"alarmTimeagoWarn":true,"alarmTimeagoWarnMins":15,"alarmTimeagoUrgent":true,"alarmTimeagoUrgentMins":30,"alarmPumpBatteryLow":false,"language":"en","scaleY":"log","showPlugins":"dbsize delta direction upbat","showForecast":"ar2","focusHours":3,"heartbeat":60,"baseURL":"","authDefaultRoles":"status-only","thresholds":{"bgHigh":260,"bgTargetTop":180,"bgTargetBottom":80,"bgLow":55},"insecureUseHttp":true,"secureHstsHeader":false,"secureHstsHeaderIncludeSubdomains":false,"secureHstsHeaderPreload":false,"secureCsp":false,"deNormalizeDates":false,"showClockDelta":false,"showClockLastTime":false,"bolusRenderOver":1,"frameUrl1":"","frameUrl2":"","frameUrl3":"","frameUrl4":"","frameUrl5":"","frameUrl6":"","frameUrl7":"","frameUrl8":"","frameName1":"","frameName2":"","frameName3":"","frameName4":"","frameName5":"","frameName6":"","frameName7":"","frameName8":"","DEFAULT_FEATURES":["bgnow","delta","direction","timeago","devicestatus","upbat","errorcodes","profile","dbsize"],"alarmTypes":["predict"],"enable":["careportal","boluscalc","food","bwp","cage","sage","iage","iob","cob","basal","ar2","rawbg","pushover","bgi","pump","openaps","treatmentnotify","bgnow","delta","direction","timeago","devicestatus","upbat","errorcodes","profile","dbsize","ar2"]},"extendedSettings":{"devicestatus":{"advanced":true,"days":1}},"authorized":null}'
)
)
async def init_integration(hass) -> MockConfigEntry:
"""Set up the Nightscout integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_URL: "https://some.url:1234"},
)
with patch(
"homeassistant.components.nightscout.NightscoutAPI.get_sgvs",
return_value=GLUCOSE_READINGS,
), patch(
"homeassistant.components.nightscout.NightscoutAPI.get_server_status",
return_value=SERVER_STATUS,
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
async def init_integration_unavailable(hass) -> MockConfigEntry:
"""Set up the Nightscout integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_URL: "https://some.url:1234"},
)
with patch(
"homeassistant.components.nightscout.NightscoutAPI.get_sgvs",
side_effect=ClientConnectionError(),
), patch(
"homeassistant.components.nightscout.NightscoutAPI.get_server_status",
return_value=SERVER_STATUS,
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
async def init_integration_empty_response(hass) -> MockConfigEntry:
"""Set up the Nightscout integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_URL: "https://some.url:1234"},
)
with patch(
"homeassistant.components.nightscout.NightscoutAPI.get_sgvs", return_value=[]
), patch(
"homeassistant.components.nightscout.NightscoutAPI.get_server_status",
return_value=SERVER_STATUS,
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
amgowano/oppia
|
refs/heads/develop
|
scripts/pre_commit_linter.py
|
1
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-commit script for Oppia.
This script lints Python and JavaScript code, and prints a
list of lint errors to the terminal. If the directory path is passed,
it will lint all Python and JavaScript files in that directory; otherwise,
it will only lint files that have been touched in this commit.
This script ignores all filepaths contained within the excludeFiles
argument in .jscsrc. Note that, as a side-effect, these filepaths will also
prevent Python files in those paths from being linted.
IMPORTANT NOTES:
1. Before running this script, you must install third-party dependencies by
running
bash scripts/start.sh
at least once.
=====================
CUSTOMIZATION OPTIONS
=====================
1. To lint only files that have been touched in this commit
python scripts/pre_commit_linter.py
2. To lint all files in the folder or to lint just a specific file
python scripts/pre_commit_linter.py --path filepath
3. To lint a specific list of files (*.js/*.py only). Separate files by spaces
python scripts/pre_commit_linter.py --files file_1 file_2 ... file_n
Note that the root folder MUST be named 'oppia'.
"""
# Pylint has issues with the import order of argparse.
# pylint: disable=wrong-import-order
import argparse
import fnmatch
import json
import multiprocessing
import os
import re
import subprocess
import sys
import time
# pylint: enable=wrong-import-order
_PARSER = argparse.ArgumentParser()
_EXCLUSIVE_GROUP = _PARSER.add_mutually_exclusive_group()
_EXCLUSIVE_GROUP.add_argument(
'--path',
help='path to the directory with files to be linted',
action='store')
_EXCLUSIVE_GROUP.add_argument(
'--files',
nargs='+',
help='specific files to be linted. Space separated list',
action='store')
BAD_PATTERNS = {
'__author__': {
'message': 'Please remove author tags from this file.',
'excluded_files': ()},
'datetime.datetime.now()': {
'message': 'Please use datetime.datetime.utcnow() instead of'
'datetime.datetime.now().',
'excluded_files': ()},
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': ()},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': ()}
}
BAD_PATTERNS_JS = {
' == ': {
'message': 'Please replace == with === in this file.',
'excluded_files': (
'core/templates/dev/head/expressions/parserSpec.js',
'core/templates/dev/head/expressions/evaluatorSpec.js',
'core/templates/dev/head/expressions/typeParserSpec.js')},
' != ': {
'message': 'Please replace != with !== in this file.',
'excluded_files': (
'core/templates/dev/head/expressions/parserSpec.js',
'core/templates/dev/head/expressions/evaluatorSpec.js',
'core/templates/dev/head/expressions/typeParserSpec.js')}
}
BAD_PATTERNS_JS_REGEXP = [
{
'regexp': r"\b(ddescribe|fdescribe)\(",
'message': "In tests, please use 'describe' instead of 'ddescribe'"
"or 'fdescribe'",
'excluded_files': ()
},
{
'regexp': r"\b(iit|fit)\(",
'message': "In tests, please use 'it' instead of 'iit' or 'fit'",
'excluded_files': ()
}
]
BAD_PATTERNS_APP_YAML = {
'MINIFICATION: true': {
'message': 'Please set the MINIFICATION env variable in app.yaml'
'to False before committing.',
'excluded_files': ()}
}
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'scripts/pre_commit_linter.py', 'integrations/*',
'integrations_dev/*', '*.svg', '*.png', '*.zip', '*.ico', '*.jpg',
'*.min.js', 'assets/scripts/*')
if not os.getcwd().endswith('oppia'):
print ''
print 'ERROR Please run this script from the oppia root directory.'
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.5.2')
if not os.path.exists(_PYLINT_PATH):
print ''
print 'ERROR Please run start.sh first to install pylint '
print ' and its dependencies.'
sys.exit(1)
_PATHS_TO_INSERT = [
_PYLINT_PATH,
os.getcwd(),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19',
'google_appengine', 'lib', 'webapp2-2.3'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19',
'google_appengine', 'lib', 'yaml-3.10'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19',
'google_appengine', 'lib', 'jinja2-2.6'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19',
'google_appengine'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'webtest-1.4.2'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'numpy-1.6.1'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'browsermob-proxy-0.7.1'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'selenium-2.53.2'),
os.path.join('third_party', 'gae-pipeline-1.9.17.0'),
os.path.join('third_party', 'bleach-1.2.2'),
os.path.join('third_party', 'gae-mapreduce-1.9.17.0'),
]
for path in _PATHS_TO_INSERT:
sys.path.insert(0, path)
from pylint import lint # pylint: disable=wrong-import-position
_MESSAGE_TYPE_SUCCESS = 'SUCCESS'
_MESSAGE_TYPE_FAILED = 'FAILED'
def _get_changed_filenames():
"""Returns a list of modified files (both staged and unstaged)
Returns:
a list of filenames of modified files
"""
unstaged_files = subprocess.check_output([
'git', 'diff', '--name-only']).splitlines()
staged_files = subprocess.check_output([
'git', 'diff', '--cached', '--name-only',
'--diff-filter=ACM']).splitlines()
return unstaged_files + staged_files
def _get_glob_patterns_excluded_from_jscsrc(config_jscsrc):
"""Collects excludeFiles from jscsrc file.
Args:
- config_jscsrc: str. Path to .jscsrc file.
Returns:
a list of files in excludeFiles.
"""
with open(config_jscsrc) as f:
f.readline() # First three lines are comments
f.readline()
f.readline()
json_data = json.loads(f.read())
return json_data['excludeFiles']
def _get_all_files_in_directory(dir_path, excluded_glob_patterns):
"""Recursively collects all files in directory and
subdirectories of specified path.
Args:
- dir_path: str. Path to the folder to be linted.
- excluded_glob_patterns: set. Set of all files to be excluded.
Returns:
a list of files in directory and subdirectories without excluded files.
"""
files_in_directory = []
for _dir, _, files in os.walk(dir_path):
for file_name in files:
filename = os.path.relpath(
os.path.join(_dir, file_name), os.getcwd())
if not any([fnmatch.fnmatch(filename, gp) for gp in
excluded_glob_patterns]):
files_in_directory.append(filename)
return files_in_directory
def _lint_js_files(node_path, jscs_path, config_jscsrc, files_to_lint, stdout,
result):
"""Prints a list of lint errors in the given list of JavaScript files.
Args:
- node_path: str. Path to the node binary.
- jscs_path: str. Path to the JSCS binary.
- config_jscsrc: str. Configuration args for the call to the JSCS binary.
- files_to_lint: list of str. A list of filepaths to lint.
- stdout: multiprocessing.Queue. A queue to store JSCS outputs
- result: multiprocessing.Queue. A queue to put results of test
Returns:
None
"""
start_time = time.time()
num_files_with_errors = 0
num_js_files = len(files_to_lint)
if not files_to_lint:
result.put('')
print 'There are no JavaScript files to lint.'
return
print 'Total js files: ', num_js_files
jscs_cmd_args = [node_path, jscs_path, config_jscsrc]
for _, filename in enumerate(files_to_lint):
print 'Linting: ', filename
proc_args = jscs_cmd_args + [filename]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
linter_stdout, linter_stderr = proc.communicate()
if linter_stderr:
print 'LINTER FAILED'
print linter_stderr
sys.exit(1)
if linter_stdout:
num_files_with_errors += 1
stdout.put(linter_stdout)
if num_files_with_errors:
result.put('%s %s JavaScript files' % (
_MESSAGE_TYPE_FAILED, num_files_with_errors))
else:
result.put('%s %s JavaScript files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_js_files, time.time() - start_time))
print 'Js linting finished.'
def _lint_py_files(config_pylint, files_to_lint, result):
"""Prints a list of lint errors in the given list of Python files.
Args:
- config_pylint: str. Path to the .pylintrc file.
- files_to_lint: list of str. A list of filepaths to lint.
- result: multiprocessing.Queue. A queue to put results of test
Returns:
None
"""
start_time = time.time()
are_there_errors = False
num_py_files = len(files_to_lint)
if not files_to_lint:
result.put('')
print 'There are no Python files to lint.'
return
print 'Linting %s Python files' % num_py_files
_BATCH_SIZE = 50
current_batch_start_index = 0
while current_batch_start_index < len(files_to_lint):
# Note that this index is an exclusive upper bound -- i.e., the current
# batch of files ranges from 'start_index' to 'end_index - 1'.
current_batch_end_index = min(
current_batch_start_index + _BATCH_SIZE, len(files_to_lint))
current_files_to_lint = files_to_lint[
current_batch_start_index : current_batch_end_index]
print 'Linting Python files %s to %s...' % (
current_batch_start_index + 1, current_batch_end_index)
try:
# This prints output to the console.
lint.Run(current_files_to_lint + [config_pylint])
except SystemExit as e:
if str(e) != '0':
are_there_errors = True
current_batch_start_index = current_batch_end_index
if are_there_errors:
result.put('%s Python linting failed' % _MESSAGE_TYPE_FAILED)
else:
result.put('%s %s Python files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_py_files, time.time() - start_time))
print 'Python linting finished.'
def _get_all_files():
"""This function is used to check if this script is ran from
root directory and to return a list of all the files for linting and
pattern checks.
"""
jscsrc_path = os.path.join(os.getcwd(), '.jscsrc')
parsed_args = _PARSER.parse_args()
if parsed_args.path:
input_path = os.path.join(os.getcwd(), parsed_args.path)
if not os.path.exists(input_path):
print 'Could not locate file or directory %s. Exiting.' % input_path
print '----------------------------------------'
sys.exit(1)
if os.path.isfile(input_path):
all_files = [input_path]
else:
excluded_glob_patterns = _get_glob_patterns_excluded_from_jscsrc(
jscsrc_path)
all_files = _get_all_files_in_directory(
input_path, excluded_glob_patterns)
elif parsed_args.files:
valid_filepaths = []
invalid_filepaths = []
for f in parsed_args.files:
if os.path.isfile(f):
valid_filepaths.append(f)
else:
invalid_filepaths.append(f)
if invalid_filepaths:
print ('The following file(s) do not exist: %s\n'
'Exiting.' % invalid_filepaths)
sys.exit(1)
all_files = valid_filepaths
else:
all_files = _get_changed_filenames()
return all_files
def _pre_commit_linter(all_files):
"""This function is used to check if node-jscs dependencies are installed
and pass JSCS binary path
"""
print 'Starting linter...'
jscsrc_path = os.path.join(os.getcwd(), '.jscsrc')
pylintrc_path = os.path.join(os.getcwd(), '.pylintrc')
config_jscsrc = '--config=%s' % jscsrc_path
config_pylint = '--rcfile=%s' % pylintrc_path
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
node_path = os.path.join(
parent_dir, 'oppia_tools', 'node-6.9.1', 'bin', 'node')
jscs_path = os.path.join(
parent_dir, 'node_modules', 'jscs', 'bin', 'jscs')
if not os.path.exists(jscs_path):
print ''
print 'ERROR Please run start.sh first to install node-jscs '
print ' and its dependencies.'
sys.exit(1)
js_files_to_lint = [
filename for filename in all_files if filename.endswith('.js')]
py_files_to_lint = [
filename for filename in all_files if filename.endswith('.py')]
js_result = multiprocessing.Queue()
linting_processes = []
js_stdout = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_js_files, args=(node_path, jscs_path, config_jscsrc,
js_files_to_lint, js_stdout, js_result)))
py_result = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_py_files,
args=(config_pylint, py_files_to_lint, py_result)))
print 'Starting Javascript and Python Linting'
print '----------------------------------------'
for process in linting_processes:
process.start()
for process in linting_processes:
# Require timeout parameter to prevent against endless waiting for the
# linting function to return.
process.join(timeout=600)
js_messages = []
while not js_stdout.empty():
js_messages.append(js_stdout.get())
print ''
print '\n'.join(js_messages)
print '----------------------------------------'
summary_messages = []
# Require block = False to prevent unnecessary waiting for the process
# output.
summary_messages.append(js_result.get(block=False))
summary_messages.append(py_result.get(block=False))
print '\n'.join(summary_messages)
print ''
return summary_messages
def _check_bad_patterns(all_files):
"""This function is used for detecting bad patterns.
"""
print 'Starting Pattern Checks'
print '----------------------------------------'
total_files_checked = 0
total_error_count = 0
summary_messages = []
all_files = [
filename for filename in all_files if not
any(fnmatch.fnmatch(filename, pattern) for pattern in EXCLUDED_PATHS)]
failed = False
for filename in all_files:
with open(filename) as f:
content = f.read()
total_files_checked += 1
for pattern in BAD_PATTERNS:
if pattern in content and filename not in (
BAD_PATTERNS[pattern]['excluded_files']):
failed = True
print '%s --> %s' % (
filename, BAD_PATTERNS[pattern]['message'])
total_error_count += 1
if filename.endswith('.js'):
for pattern in BAD_PATTERNS_JS:
if filename not in (
BAD_PATTERNS_JS[pattern]['excluded_files']):
if pattern in content:
failed = True
print '%s --> %s' % (
filename,
BAD_PATTERNS_JS[pattern]['message'])
total_error_count += 1
for regexp in BAD_PATTERNS_JS_REGEXP:
regexp_pattern = regexp['regexp']
if filename not in regexp['excluded_files']:
if re.search(regexp_pattern, content):
failed = True
print '%s --> %s' % (
filename,
regexp['message'])
total_error_count += 1
if filename == 'app.yaml':
for pattern in BAD_PATTERNS_APP_YAML:
if pattern in content:
failed = True
print '%s --> %s' % (
filename,
BAD_PATTERNS_APP_YAML[pattern]['message'])
total_error_count += 1
if failed:
summary_message = '%s Pattern checks failed' % _MESSAGE_TYPE_FAILED
summary_messages.append(summary_message)
else:
summary_message = '%s Pattern checks passed' % _MESSAGE_TYPE_SUCCESS
summary_messages.append(summary_message)
print ''
print '----------------------------------------'
print ''
if total_files_checked == 0:
print "There are no files to be checked."
else:
print '(%s files checked, %s errors found)' % (
total_files_checked, total_error_count)
print summary_message
return summary_messages
def main():
all_files = _get_all_files()
linter_messages = _pre_commit_linter(all_files)
pattern_messages = _check_bad_patterns(all_files)
all_messages = linter_messages + pattern_messages
if any([message.startswith(_MESSAGE_TYPE_FAILED) for message in
all_messages]):
sys.exit(1)
if __name__ == '__main__':
main()
|
Matt-Deacalion/django
|
refs/heads/master
|
tests/resolve_url/urls.py
|
357
|
from django.conf.urls import url
from django.contrib.auth import views
urlpatterns = [
url(r'^accounts/logout/$', views.logout, name='logout'),
]
|
imranyousuf/project-kappa
|
refs/heads/master
|
code/utils/list_cond.py
|
5
|
import os
def list_every_cond(subject, task_num):
"""
This function returns condition list.
Parameter
---------
subject: str
Please specify the number of the subject. For example, you should input 'sub001' if choose the first subject.
task_num: task_num
Please specify the number of the task. For example, you should input 'task001_run001' if choose the first subject.
Returns
------
condition: list
It lists each conditions
"""
sub_path = os.path.realpath('ds105/' + subject)
sub_path_cond = sub_path + '/model/model001/onsets' + '/' + task_num
condition = [ i for i in os.listdir(sub_path_cond) if not (i.startswith('.'))]
return condition
|
bigdig/vnpy
|
refs/heads/master
|
vnpy/trader/event.py
|
2
|
"""
Event type string used in VN Trader.
"""
from vnpy.event import EVENT_TIMER # noqa
EVENT_TICK = "eTick."
EVENT_TRADE = "eTrade."
EVENT_ORDER = "eOrder."
EVENT_POSITION = "ePosition."
EVENT_ACCOUNT = "eAccount."
EVENT_CONTRACT = "eContract."
EVENT_LOG = "eLog"
|
agry/NGECore2
|
refs/heads/master
|
scripts/loot/lootPools/yavin_iv/re_junk_imperial_observer.py
|
2
|
def itemNames():
return ['armor_repair_device']
def itemChances():
return [100]
|
waidyanatha/sambro-eden
|
refs/heads/master
|
private/templates/OCHA/controllers.py
|
2
|
# -*- coding: utf-8 -*-
from os import path
from gluon import *
from gluon.storage import Storage
from s3 import *
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
request = current.request
response = current.response
settings = current.deployment_settings
response.title = settings.get_system_name()
T = current.T
s3 = response.s3
appname = request.application
project_items = project()()
datatable_ajax_source = "/%s/default/index/project.aadata" % \
appname
s3.actions = None
project_box = DIV(H3(T("Projects")),
A(T("Add Project"),
_href = URL(c="project", f="project",
args=["create"]),
_id = "add-btn",
_class = "action-btn",
_style = "margin-right:10px;"),
project_items,
_id = "org_box",
_class = "menu_box fleft"
)
# Login/Registration forms
self_registration = settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
roles = current.session.s3.roles
auth = current.auth
system_roles = auth.get_system_roles()
AUTHENTICATED = system_roles.AUTHENTICATED
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
register_form = auth.s3_registration_form()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to add data, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
if request.env.request_method == "POST":
post_script = \
'''$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')'''
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
view = path.join(request.folder, "private", "templates",
"OCHA", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
return dict(title = response.title,
project_box = project_box,
r = None, # Required for dataTable to work
datatable_ajax_source = datatable_ajax_source,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# =============================================================================
class project():
"""
Function to handle pagination for the project list on the homepage
"""
def __call__(self):
request = current.request
get_vars = request.get_vars
resource = current.s3db.resource("project_project")
totalrows = resource.count()
if "iDisplayLength" in get_vars:
display_length = int(request.get_vars["iDisplayLength"])
else:
display_length = 10
limit = 4 * display_length
list_fields = ["id", "name"]
filter, orderby, left = resource.datatable_filter(list_fields,
get_vars)
resource.add_filter(filter)
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
rfields = data["rfields"]
rows = data["rows"]
dt = S3DataTable(rfields, rows)
dt.defaultActionButtons(resource)
current.response.s3.no_formats = True
if request.extension == "html":
items = dt.html(totalrows,
totalrows,
"org_dt",
dt_displayLength=display_length,
dt_ajax_url=URL(c="default",
f="index",
args=["project"],
extension="aadata",
vars={"id": "org_dt"},
),
dt_pagination="true",
)
elif request.extension.lower() == "aadata":
if "sEcho" in request.vars:
echo = int(request.vars.sEcho)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
"org_dt",
echo)
else:
from gluon.http import HTTP
raise HTTP(501, resource.ERROR.BAD_FORMAT)
return items
# END =========================================================================
|
tafaRU/odoo
|
refs/heads/8.0
|
addons/website_mail/tests/__init__.py
|
121
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 20123TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import test_controllers
checks = [
test_controllers,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hannes/linux
|
refs/heads/master
|
Documentation/admin-guide/conf.py
|
361
|
# -*- coding: utf-8; mode: python -*-
project = 'Linux Kernel User Documentation'
tags.add("subproject")
latex_documents = [
('index', 'linux-user.tex', 'Linux Kernel User Documentation',
'The kernel development community', 'manual'),
]
|
ajaali/django
|
refs/heads/master
|
django/db/models/sql/where.py
|
439
|
"""
Code to manage the creation and SQL rendering of 'where' constraints.
"""
from django.db.models.sql.datastructures import EmptyResultSet
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = 'AND'
OR = 'OR'
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
def split_having(self, negated=False):
"""
Returns two possibly None nodes: one for those parts of self that
should be included in the WHERE clause and one for those parts of
self that must be included in the HAVING clause.
"""
if not self.contains_aggregate:
return self, None
in_negated = negated ^ self.negated
# If the effective connector is OR and this node contains an aggregate,
# then we need to push the whole branch to HAVING clause.
may_need_split = (
(in_negated and self.connector == AND) or
(not in_negated and self.connector == OR))
if may_need_split and self.contains_aggregate:
return None, self
where_parts = []
having_parts = []
for c in self.children:
if hasattr(c, 'split_having'):
where_part, having_part = c.split_having(in_negated)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
having_node = self.__class__(having_parts, self.connector, self.negated) if having_parts else None
where_node = self.__class__(where_parts, self.connector, self.negated) if where_parts else None
return where_node, having_node
def as_sql(self, compiler, connection):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns '', [] if this node matches everything,
None, [] if this node is empty, and raises EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
return '', []
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
return '', []
conn = ' %s ' % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = 'NOT (%s)' % sql_string
elif len(result) > 1:
sql_string = '(%s)' % sql_string
return sql_string, result_params
def get_group_by_cols(self):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def relabel_aliases(self, change_map):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, 'relabel_aliases'):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, 'relabeled_clone'):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
"""
Creates a clone of the tree. Must only be called on root nodes (nodes
with empty subtree_parents). Childs must be either (Contraint, lookup,
value) tuples, or objects supporting .clone().
"""
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
class NothingNode(object):
"""
A node that matches nothing.
"""
contains_aggregate = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere(object):
# The contents are a black box - assume no aggregates are used.
contains_aggregate = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint(object):
# Even if aggregates would be used in a subquery, the outer query isn't
# interested about those.
contains_aggregate = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
# QuerySet was sent
if hasattr(query, 'values'):
if query._db and connection.alias != query._db:
raise ValueError("Can't do subqueries with queries on different DBs.")
# Do not override already existing values.
if query._fields is None:
query = query.values(*self.targets)
else:
query = query._clone()
query = query.query
if query.can_filter():
# If there is no slicing in use, then we can safely drop all ordering
query.clear_ordering(True)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
def relabel_aliases(self, change_map):
self.alias = change_map.get(self.alias, self.alias)
def clone(self):
return self.__class__(
self.alias, self.columns, self.targets,
self.query_object)
|
pymedusa/Medusa
|
refs/heads/master
|
ext/subliminal/refiners/tvdb.py
|
5
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from functools import wraps
import logging
import re
from babelfish import Country
import guessit
import requests
from .. import __short_version__
from ..cache import REFINER_EXPIRATION_TIME, region
from ..utils import sanitize
from ..video import Episode
logger = logging.getLogger(__name__)
series_re = re.compile(r'^(?P<series>.*?)(?: \((?:(?P<year>\d{4})|(?P<country>[A-Z]{2}))\))?$')
def requires_auth(func):
"""Decorator for :class:`TVDBClient` methods that require authentication"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self.token is None or self.token_expired:
self.login()
elif self.token_needs_refresh:
self.refresh_token()
return func(self, *args, **kwargs)
return wrapper
class TVDBClient(object):
"""TVDB REST API Client
:param str apikey: API key to use.
:param str username: username to use.
:param str password: password to use.
:param str language: language of the responses.
:param session: session object to use.
:type session: :class:`requests.sessions.Session` or compatible.
:param dict headers: additional headers.
:param int timeout: timeout for the requests.
"""
#: Base URL of the API
base_url = 'https://api.thetvdb.com'
#: Token lifespan
token_lifespan = timedelta(hours=1)
#: Minimum token age before a :meth:`refresh_token` is triggered
refresh_token_every = timedelta(minutes=30)
def __init__(self, apikey=None, username=None, password=None, language='en', session=None, headers=None,
timeout=10):
#: API key
self.apikey = apikey
#: Username
self.username = username
#: Password
self.password = password
#: Last token acquisition date
self.token_date = datetime.utcnow() - self.token_lifespan
#: Session for the requests
self.session = session or requests.Session()
self.session.timeout = timeout
self.session.headers.update(headers or {})
self.session.headers['Content-Type'] = 'application/json'
self.session.headers['Accept-Language'] = language
@property
def language(self):
return self.session.headers['Accept-Language']
@language.setter
def language(self, value):
self.session.headers['Accept-Language'] = value
@property
def token(self):
if 'Authorization' not in self.session.headers:
return None
return self.session.headers['Authorization'][7:]
@property
def token_expired(self):
return datetime.utcnow() - self.token_date > self.token_lifespan
@property
def token_needs_refresh(self):
return datetime.utcnow() - self.token_date > self.refresh_token_every
def login(self):
"""Login"""
# perform the request
data = {'apikey': self.apikey, 'username': self.username, 'password': self.password}
r = self.session.post(self.base_url + '/login', json=data)
r.raise_for_status()
# set the Authorization header
self.session.headers['Authorization'] = 'Bearer ' + r.json()['token']
# update token_date
self.token_date = datetime.utcnow()
def refresh_token(self):
"""Refresh token"""
# perform the request
r = self.session.get(self.base_url + '/refresh_token')
r.raise_for_status()
# set the Authorization header
self.session.headers['Authorization'] = 'Bearer ' + r.json()['token']
# update token_date
self.token_date = datetime.utcnow()
@requires_auth
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""Search series"""
# perform the request
params = {'name': name, 'imdbId': imdb_id, 'zap2itId': zap2it_id}
r = self.session.get(self.base_url + '/search/series', params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()['data']
@requires_auth
def get_series(self, id):
"""Get series"""
# perform the request
r = self.session.get(self.base_url + '/series/{}'.format(id))
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()['data']
@requires_auth
def get_series_actors(self, id):
"""Get series actors"""
# perform the request
r = self.session.get(self.base_url + '/series/{}/actors'.format(id))
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()['data']
@requires_auth
def get_series_episodes(self, id, page=1):
"""Get series episodes"""
# perform the request
params = {'page': page}
r = self.session.get(self.base_url + '/series/{}/episodes'.format(id), params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()
@requires_auth
def query_series_episodes(self, id, absolute_number=None, aired_season=None, aired_episode=None, dvd_season=None,
dvd_episode=None, imdb_id=None, page=1):
"""Query series episodes"""
# perform the request
params = {'absoluteNumber': absolute_number, 'airedSeason': aired_season, 'airedEpisode': aired_episode,
'dvdSeason': dvd_season, 'dvdEpisode': dvd_episode, 'imdbId': imdb_id, 'page': page}
r = self.session.get(self.base_url + '/series/{}/episodes/query'.format(id), params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()
@requires_auth
def get_episode(self, id):
"""Get episode"""
# perform the request
r = self.session.get(self.base_url + '/episodes/{}'.format(id))
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()['data']
#: User-Agent to use
user_agent = 'Subliminal/%s' % __short_version__
#: Configured instance of :class:`TVDBClient`
tvdb_client = TVDBClient('5EC930FB90DA1ADA', headers={'User-Agent': user_agent})
#: Configure guessit in order to use GuessitCountryConverter
guessit.api.configure()
@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME)
def search_series(name):
"""Search series.
:param str name: name of the series.
:return: the search results.
:rtype: list
"""
return tvdb_client.search_series(name)
@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME)
def get_series(id):
"""Get series.
:param int id: id of the series.
:return: the series data.
:rtype: dict
"""
return tvdb_client.get_series(id)
@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME)
def get_series_episode(series_id, season, episode):
"""Get an episode of a series.
:param int series_id: id of the series.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:return: the episode data.
:rtype: dict
"""
result = tvdb_client.query_series_episodes(series_id, aired_season=season, aired_episode=episode)
if result:
return tvdb_client.get_episode(result['data'][0]['id'])
def refine(video, **kwargs):
"""Refine a video by searching `TheTVDB <http://thetvdb.com/>`_.
.. note::
This refiner only work for instances of :class:`~subliminal.video.Episode`.
Several attributes can be found:
* :attr:`~subliminal.video.Episode.series`
* :attr:`~subliminal.video.Episode.year`
* :attr:`~subliminal.video.Episode.series_imdb_id`
* :attr:`~subliminal.video.Episode.series_tvdb_id`
* :attr:`~subliminal.video.Episode.title`
* :attr:`~subliminal.video.Video.imdb_id`
* :attr:`~subliminal.video.Episode.tvdb_id`
"""
# only deal with Episode videos
if not isinstance(video, Episode):
logger.error('Cannot refine episodes')
return
# exit if the information is complete
if video.series_tvdb_id and video.tvdb_id:
logger.debug('No need to search')
return
# search the series
logger.info('Searching series %r', video.series)
results = search_series(video.series.lower())
if not results:
logger.warning('No results for series')
return
logger.debug('Found %d results', len(results))
# search for exact matches
matching_results = []
for result in results:
matching_result = {}
# use seriesName and aliases
series_names = [result['seriesName']]
series_names.extend(result['aliases'])
# parse the original series as series + year or country
original_match = series_re.match(result['seriesName']).groupdict()
# parse series year
series_year = None
if result['firstAired']:
series_year = datetime.strptime(result['firstAired'], '%Y-%m-%d').year
# discard mismatches on year
if video.year and series_year and video.year != series_year:
logger.debug('Discarding series %r mismatch on year %d', result['seriesName'], series_year)
continue
# iterate over series names
for series_name in series_names:
# parse as series, year and country
series, year, country = series_re.match(series_name).groups()
if year:
year = int(year)
if country:
country = Country.fromguessit(country)
# discard mismatches on year
if year and (video.original_series or video.year != year):
logger.debug('Discarding series name %r mismatch on year %d', series, year)
continue
# discard mismatches on country
if video.country and video.country != country:
logger.debug('Discarding series name %r mismatch on country %r', series, country)
continue
# match on sanitized series name
if sanitize(series) == sanitize(video.series):
logger.debug('Found exact match on series %r', series_name)
matching_result['match'] = {
'series': original_match['series'],
'year': series_year or year,
'country': country,
'original_series': original_match['year'] is None and country is None
}
break
# add the result on match
if matching_result:
matching_result['data'] = result
matching_results.append(matching_result)
# exit if we don't have exactly 1 matching result
if not matching_results:
logger.error('No matching series found')
return
if len(matching_results) > 1:
logger.error('Multiple matches found')
return
# get the series
matching_result = matching_results[0]
series = get_series(matching_result['data']['id'])
# add series information
logger.debug('Found series %r', series)
video.series = matching_result['match']['series']
video.alternative_series.extend(series['aliases'])
video.year = matching_result['match']['year']
video.country = matching_result['match']['country']
video.original_series = matching_result['match']['original_series']
video.series_tvdb_id = series['id']
video.series_imdb_id = series['imdbId'] or None
# get the episode
logger.info('Getting series episode %dx%d', video.season, video.episode)
episode = get_series_episode(video.series_tvdb_id, video.season, video.episode)
if not episode:
logger.warning('No results for episode')
return
# add episode information
logger.debug('Found episode %r', episode)
video.tvdb_id = episode['id']
video.title = episode['episodeName'] or None
video.imdb_id = episode['imdbId'] or None
|
papouso/odoo
|
refs/heads/8.0
|
addons/account_anglo_saxon/stock.py
|
208
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class stock_move(osv.Model):
_inherit = "stock.move"
def _get_invoice_line_vals(self, cr, uid, move, partner, inv_type, context=None):
""" Add a reference to the stock.move in the invoice line
In anglo-saxon the price for COGS should be taken from stock.move
if possible (fallback on standard_price)
"""
res = super(stock_move, self)._get_invoice_line_vals(cr, uid, move, partner, inv_type, context=context)
res.update({
'move_id': move.id,
})
return res
class stock_picking(osv.osv):
_inherit = "stock.picking"
_description = "Picking List"
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
'''Return ids of created invoices for the pickings'''
res = super(stock_picking,self).action_invoice_create(cr, uid, ids, journal_id, group, type, context=context)
if type in ('in_invoice', 'in_refund'):
for inv in self.pool.get('account.invoice').browse(cr, uid, res, context=context):
for ol in inv.invoice_line:
if ol.product_id.type != 'service':
oa = ol.product_id.property_stock_account_input and ol.product_id.property_stock_account_input.id
if not oa:
oa = ol.product_id.categ_id.property_stock_account_input_categ and ol.product_id.categ_id.property_stock_account_input_categ.id
if oa:
fpos = ol.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
self.pool.get('account.invoice.line').write(cr, uid, [ol.id], {'account_id': a})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
google/clusterfuzz
|
refs/heads/master
|
src/python/tests/appengine/handlers/reproduce_tool/testcase_info_test.py
|
1
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the reproduce tool testcase_info handler."""
# pylint: disable=protected-access
import unittest
from datastore import data_types
from handlers.reproduce_tool import testcase_info
from tests.test_libs import test_utils
@test_utils.with_cloud_emulators('datastore')
class PrepareTestcaseDictTest(unittest.TestCase):
"""Tests for _prepare_testcase_dict."""
def setUp(self):
job = data_types.Job(name='test_job', environment_string='X = 1\nY = 2\n')
job.put()
testcase = data_types.Testcase()
testcase.status = 'Pending'
testcase.open = True
testcase.job_type = 'test_job'
testcase.put()
self.testcase = testcase
def test_expected_properties_included(self):
"""Ensure that a few of the common test case properties are included."""
result = testcase_info._prepare_testcase_dict(self.testcase)
self.assertEqual(result['status'], 'Pending')
self.assertEqual(result['open'], True)
self.assertEqual(result['group_id'], 0)
def test_job_included(self):
"""Ensure that the job definition has been included."""
result = testcase_info._prepare_testcase_dict(self.testcase)
job_definition = result['job_definition']
# Order is not necessarily preserved.
self.assertIn('X = 1\n', job_definition)
self.assertIn('Y = 2\n', job_definition)
|
rtucker/sycamore
|
refs/heads/master
|
Sycamore/support/pytz/zoneinfo/Africa/Niamey.py
|
9
|
'''tzinfo timezone information for Africa/Niamey.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Niamey(DstTzInfo):
'''Africa/Niamey timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Niamey'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1911,12,31,23,51,32),
d(1934,2,26,1,0,0),
d(1960,1,1,0,0,0),
]
_transition_info = [
i(480,0,'LMT'),
i(-3600,0,'WAT'),
i(0,0,'GMT'),
i(3600,0,'WAT'),
]
Niamey = Niamey()
|
Kazade/NeHe-Website
|
refs/heads/master
|
google_appengine/lib/django-1.5/django/contrib/webdesign/lorem_ipsum.py
|
230
|
"""
Utility functions for generating "lorem ipsum" Latin text.
"""
from __future__ import unicode_literals
import random
COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = ', '.join(sections)
# Convert to sentence case and add end punctuation.
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return ' '.join([sentence() for i in range(random.randint(1, 4))])
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return ' '.join(word_list)
|
danakj/chromium
|
refs/heads/master
|
ppapi/generators/idl_ast.py
|
104
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Nodes for PPAPI IDL AST."""
from idl_namespace import IDLNamespace
from idl_node import IDLNode
from idl_option import GetOption
from idl_visitor import IDLVisitor
from idl_release import IDLReleaseMap
#
# IDLLabelResolver
#
# A specialized visitor which traverses the AST, building a mapping of
# Release names to Versions numbers and calculating a min version.
# The mapping is applied to the File nodes within the AST.
#
class IDLLabelResolver(IDLVisitor):
def Depart(self, node, ignore, childdata):
# Build list of Release=Version
if node.IsA('LabelItem'):
channel = node.GetProperty('channel')
if not channel:
channel = 'stable'
return (node.GetName(), node.GetProperty('VALUE'), channel)
# On completion of the Label, apply to the parent File if the
# name of the label matches the generation label.
if node.IsA('Label') and node.GetName() == GetOption('label'):
try:
node.parent.release_map = IDLReleaseMap(childdata)
except Exception as err:
node.Error('Unable to build release map: %s' % str(err))
# For File objects, set the minimum version
if node.IsA('File'):
file_min, _ = node.release_map.GetReleaseRange()
node.SetMin(file_min)
return None
#
# IDLNamespaceVersionResolver
#
# A specialized visitor which traverses the AST, building a namespace tree
# as it goes. The namespace tree is mapping from a name to a version list.
# Labels must already be resolved to use.
#
class IDLNamespaceVersionResolver(IDLVisitor):
NamespaceSet = set(['AST', 'Callspec', 'Interface', 'Member', 'Struct'])
#
# When we arrive at a node we must assign it a namespace and if the
# node is named, then place it in the appropriate namespace.
#
def Arrive(self, node, parent_namespace):
# If we are a File, grab the Min version and replease mapping
if node.IsA('File'):
self.rmin = node.GetMinMax()[0]
self.release_map = node.release_map
# Set the min version on any non Label within the File
if not node.IsA('AST', 'File', 'Label', 'LabelItem'):
my_min, _ = node.GetMinMax()
if not my_min:
node.SetMin(self.rmin)
# If this object is not a namespace aware object, use the parent's one
if node.cls not in self.NamespaceSet:
node.namespace = parent_namespace
else:
# otherwise create one.
node.namespace = IDLNamespace(parent_namespace)
# If this node is named, place it in its parent's namespace
if parent_namespace and node.cls in IDLNode.NamedSet:
# Set version min and max based on properties
if self.release_map:
vmin = node.GetProperty('dev_version')
if vmin == None:
vmin = node.GetProperty('version')
vmax = node.GetProperty('deprecate')
# If no min is available, the use the parent File's min
if vmin == None:
rmin = self.rmin
else:
rmin = self.release_map.GetRelease(vmin)
rmax = self.release_map.GetRelease(vmax)
node.SetReleaseRange(rmin, rmax)
parent_namespace.AddNode(node)
# Pass this namespace to each child in case they inherit it
return node.namespace
#
# IDLFileTypeRessolver
#
# A specialized visitor which traverses the AST and sets a FILE property
# on all file nodes. In addition, searches the namespace resolving all
# type references. The namespace tree must already have been populated
# before this visitor is used.
#
class IDLFileTypeResolver(IDLVisitor):
def VisitFilter(self, node, data):
return not node.IsA('Comment', 'Copyright')
def Arrive(self, node, filenode):
# Track the file node to update errors
if node.IsA('File'):
node.SetProperty('FILE', node)
filenode = node
if not node.IsA('AST'):
file_min, _ = filenode.release_map.GetReleaseRange()
if not file_min:
print 'Resetting min on %s to %s' % (node, file_min)
node.SetMinRange(file_min)
# If this node has a TYPEREF, resolve it to a version list
typeref = node.GetPropertyLocal('TYPEREF')
if typeref:
node.typelist = node.parent.namespace.FindList(typeref)
if not node.typelist:
node.Error('Could not resolve %s.' % typeref)
else:
node.typelist = None
return filenode
#
# IDLReleaseResolver
#
# A specialized visitor which will traverse the AST, and generate a mapping
# from any release to the first release in which that version of the object
# was generated. Types must already be resolved to use.
#
class IDLReleaseResolver(IDLVisitor):
def Arrive(self, node, releases):
node.BuildReleaseMap(releases)
return releases
#
# IDLAst
#
# A specialized version of the IDLNode for containing the whole of the
# AST. Construction of the AST object will cause resolution of the
# tree including versions, types, etc... Errors counts will be collected
# both per file, and on the AST itself.
#
class IDLAst(IDLNode):
def __init__(self, children):
IDLNode.__init__(self, 'AST', 'BuiltIn', 1, 0, children)
self.Resolve()
def Resolve(self):
# Set the appropriate Release=Version mapping for each File
IDLLabelResolver().Visit(self, None)
# Generate the Namesapce Tree
self.namespace = IDLNamespace(None)
IDLNamespaceVersionResolver().Visit(self, self.namespace)
# Using the namespace, resolve type references
IDLFileTypeResolver().Visit(self, None)
# Build an ordered list of all releases
releases = set()
for filenode in self.GetListOf('File'):
releases |= set(filenode.release_map.GetReleases())
# Generate a per node list of releases and release mapping
IDLReleaseResolver().Visit(self, sorted(releases))
for filenode in self.GetListOf('File'):
errors = filenode.GetProperty('ERRORS')
if errors:
self.errors += errors
|
MillerCMBLabUSC/lab_analysis
|
refs/heads/master
|
apps/4f_model/OldCode/polEmission.py
|
1
|
import numpy as np
import thermo as th
import scipy.integrate as intg
chi = np.deg2rad(32.5)
e0 = 8.81e-12
rho = 2.417e-8
nu = 145e9
Dnu = 10e9
e = lambda x : np.sqrt(4 * np.pi * e0 * x * rho)
e2 = (1 / np.cos(chi) - np.cos(chi))
fact = 1e12 * (1 / .18)
emisAtm = 3.34e-2
p1 = intg.quad(lambda x: e2 * e(x) * th.weightedSpec(x, 273, 1) , nu - Dnu, nu + Dnu)[0]
p2 = intg.quad(lambda x: e2 * e(x) * th.weightedSpec(x, 273, emisAtm) , nu - Dnu, nu + Dnu)[0]
print e2
# print p1*1e12
print (p1 - p2)*fact
|
cbrewster/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/tests/new_session/merge.py
|
3
|
# META: timeout=long
import pytest
from tests.support.asserts import assert_error, assert_success
from conftest import platform_name
@pytest.mark.skipif(platform_name() is None, reason="Unsupported platform")
@pytest.mark.parametrize("body", [lambda key, value: {"alwaysMatch": {key: value}},
lambda key, value: {"firstMatch": [{key: value}]}])
def test_platform_name(new_session, add_browser_capabilities, platform_name, body):
capabilities = body("platformName", platform_name)
if "alwaysMatch" in capabilities:
capabilities["alwaysMatch"] = add_browser_capabilities(capabilities["alwaysMatch"])
else:
capabilities["firstMatch"][0] = add_browser_capabilities(capabilities["firstMatch"][0])
response, _ = new_session({"capabilities": capabilities})
value = assert_success(response)
assert value["capabilities"]["platformName"] == platform_name
invalid_merge = [
("acceptInsecureCerts", (True, True)),
("unhandledPromptBehavior", ("accept", "accept")),
("unhandledPromptBehavior", ("accept", "dismiss")),
("timeouts", ({"script": 10}, {"script": 10})),
("timeouts", ({"script": 10}, {"pageLoad": 10})),
]
@pytest.mark.parametrize("key,value", invalid_merge)
def test_merge_invalid(new_session, add_browser_capabilities, key, value):
response, _ = new_session({"capabilities": {
"alwaysMatch": add_browser_capabilities({key: value[0]}),
"firstMatch": [{}, {key: value[1]}],
}})
assert_error(response, "invalid argument")
@pytest.mark.skipif(platform_name() is None, reason="Unsupported platform")
def test_merge_platformName(new_session, add_browser_capabilities, platform_name):
response, _ = new_session({"capabilities": {
"alwaysMatch": add_browser_capabilities({"timeouts": {"script": 10}}),
"firstMatch": [{
"platformName": platform_name.upper(),
"pageLoadStrategy": "none",
}, {
"platformName": platform_name,
"pageLoadStrategy": "eager",
}]}})
value = assert_success(response)
assert value["capabilities"]["platformName"] == platform_name
assert value["capabilities"]["pageLoadStrategy"] == "eager"
def test_merge_browserName(new_session, add_browser_capabilities):
response, session = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({})}})
value = assert_success(response)
browser_settings = {
"browserName": value["capabilities"]["browserName"],
"browserVersion": value["capabilities"]["browserVersion"],
}
response, _ = new_session({"capabilities": {
"alwaysMatch": add_browser_capabilities({"timeouts": {"script": 10}}),
"firstMatch": [{
"browserName": browser_settings["browserName"] + "invalid",
"pageLoadStrategy": "none",
}, {
"browserName": browser_settings["browserName"],
"pageLoadStrategy": "eager",
}]}}, delete_existing_session=True)
value = assert_success(response)
assert value["capabilities"]["browserName"] == browser_settings['browserName']
assert value["capabilities"]["pageLoadStrategy"] == "eager"
|
Brocade-OpenSource/OpenStack-DNRM-Nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/140_drop_unused_postgresql_volume_sequences.py
|
28
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# NOTE(dprince): Remove unused snapshots/volumes sequences.
# These are leftovers from the ID --> UUID conversion for these tables
# that occurred in Folsom.
if migrate_engine.name == "postgresql":
base_query = """SELECT COUNT(*) FROM pg_class c
WHERE c.relkind = 'S'
AND relname = '%s';"""
result = migrate_engine.execute(base_query % "snapshots_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE snapshots_id_seq CASCADE;"
migrate_engine.execute(sql)
result = migrate_engine.execute(base_query % "volumes_id_seq")
if result.scalar() > 0:
sql = "DROP SEQUENCE volumes_id_seq CASCADE;"
migrate_engine.execute(sql)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == "postgresql":
sql = """CREATE SEQUENCE snapshots_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE snapshots_id_seq OWNED BY snapshots.id;
SELECT pg_catalog.setval('snapshots_id_seq', 1, false);
ALTER TABLE ONLY snapshots ALTER COLUMN id SET DEFAULT
nextval('snapshots_id_seq'::regclass);"""
sql += """CREATE SEQUENCE volumes_id_seq START WITH 1 INCREMENT BY 1
NO MINVALUE NO MAXVALUE CACHE 1;
ALTER SEQUENCE volumes_id_seq OWNED BY volumes.id;
SELECT pg_catalog.setval('volumes_id_seq', 1, false);
ALTER TABLE ONLY volumes ALTER COLUMN id SET DEFAULT
nextval('volumes_id_seq'::regclass);"""
migrate_engine.execute(sql)
|
tiangolo/ansible
|
refs/heads/devel
|
v1/ansible/runner/lookup_plugins/__init__.py
|
12133432
| |
Russell-IO/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/junos/__init__.py
|
12133432
| |
osneven/usbauth
|
refs/heads/master
|
src/utils/paths.py
|
1
|
'''
USBAuth, a USB device authentication tool.
Copyright (C) 2016 Oliver Stochholm Neven
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
For any further information contact me at oliver@neven.dk
'''
from os.path import expanduser, exists
from os import makedirs
# All files and directories used by the program
class Paths:
def __init__(self):
# Directory where the program source files is located
self.__source_dir = "/opt/usbauth/"
# Where to store temporary files, should be cleared everytime the process is terminated
self.__temporary_dir = "/tmp/usbauth/"
self.__pid_file = self.get_temporary_dir() + "process-pid"
self.__log_name_file = self.get_temporary_dir() + "process-logname"
# Where to store files for longer periods, such as logs
self.__various_dir = "/var/usbauth/"
self.__database_file = self.get_various_dir() + "storage.db"
self.__log_dir = self.get_various_dir() + "logs/"
# Where to store configuration files
self.__configuration_dir = self.get_various_dir() + "config/"
# Where to store sensetive information, such as password hashes and salts
self.__secret_dir = self.get_various_dir() + "secret/"
self.__passowrd_hash_file = self.get_secret_dir() + "password-hash"
self.__password_salt_file = self.get_secret_dir() + "password-salt"
# Where USB connections shows up as 'folders'
self.__usb_bus_dir = "/sys/bus/usb/devices/"
self.__usb_authorized_filename = "authorized"
self.__usb_vendor_filename = "manufacturer"
self.__usb_vendor_id_filename = "idVendor"
self.__usb_product_filename = "product"
self.__usb_product_id_filename = "idProduct"
self.__usb_serial_filename = "serial"
# Creates all the directories if they don't exists
def create_directories(self):
directories = []
for item in self.__dict__:
x = 3
if len(item) > x and item[-x:].lower() == "dir":
directories.append(item)
[print(x) for x in directories]
# Getters for all the directories and files
def get_source_dir(self): return self.__source_dir
def get_temporary_dir(self): return self.__temporary_dir
def get_pid_file(self): return self.__pid_file
def get_log_name_file(self): return self.__log_name_file
def get_various_dir(self): return self.__various_dir
def get_database_file(self): return self.__database_file
def get_log_dir(self): return self.__log_dir
def get_configuration_dir(self): return self.__configuration_dir
def get_secret_dir(self): return self.__secret_dir
def get_password_hash_file(self): return self.__passowrd_hash_file
def get_password_salt_file(self): return self.__password_salt_file
def get_usb_bus_dir(self): return self.__usb_bus_dir
def get_usb_authorized_filename(self): return self.__usb_authorized_filename
def get_usb_vendor_filename(self): return self.__usb_vendor_filename
def get_usb_vendor_id_filename(self): return self.__usb_vendor_id_filename
def get_usb_product_filename(self): return self.__usb_product_filename
def get_usb_product_id_filename(self): return self.__usb_product_id_filename
def get_usb_serial_filename(self): return self.__usb_serial_filename
"""
# Check if all directories above exists, if not, create them
@staticmethod
def create_paths():
directories = [Paths.TMP_DIR, Paths.INSTALL_DIR, Paths.CONFIG_DIR, Paths.LOG_DIR]
for directory in directories:
if not exists(directory):
makedirs(directory)
# Deletes the temporary directory and all its content, should be called every time the process stops
@staticmethod
def delete_tmp_dir():
from shutil import rmtree
try:
rmtree(Paths.TMP_DIR)
except FileNotFoundError:
pass
"""
|
AccelAI/accel.ai
|
refs/heads/master
|
flask-aws/lib/python2.7/site-packages/docutils/parsers/rst/languages/sv.py
|
121
|
# $Id: sv.py 7119 2011-09-02 13:00:23Z milde $
# Author: Adam Chodorowski <chodorowski@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'observera': 'attention',
u'caution (translation required)': 'caution',
u'code (translation required)': 'code',
u'fara': 'danger',
u'fel': 'error',
u'v\u00e4gledning': 'hint',
u'viktigt': 'important',
u'notera': 'note',
u'tips': 'tip',
u'varning': 'warning',
u'admonition (translation required)': 'admonition',
u'sidebar (translation required)': 'sidebar',
u'\u00e4mne': 'topic',
u'line-block (translation required)': 'line-block',
u'parsed-literal (translation required)': 'parsed-literal',
u'mellanrubrik': 'rubric',
u'epigraph (translation required)': 'epigraph',
u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote',
u'compound (translation required)': 'compound',
u'container (translation required)': 'container',
# u'fr\u00e5gor': 'questions',
# NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/:
# u'fr\u00e5gor-och-svar': 'questions',
# u'vanliga-fr\u00e5gor': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'list-table (translation required)': 'list-table',
u'meta': 'meta',
'math (translation required)': 'math',
# u'bildkarta': 'imagemap', # FIXME: Translation might be too literal.
u'bild': 'image',
u'figur': 'figure',
u'inkludera': 'include',
u'r\u00e5': 'raw', # FIXME: Translation might be too literal.
u'ers\u00e4tt': 'replace',
u'unicode': 'unicode',
u'datum': 'date',
u'class (translation required)': 'class',
u'role (translation required)': 'role',
u'default-role (translation required)': 'default-role',
u'title (translation required)': 'title',
u'inneh\u00e5ll': 'contents',
u'sektionsnumrering': 'sectnum',
u'target-notes (translation required)': 'target-notes',
u'header (translation required)': 'header',
u'footer (translation required)': 'footer',
# u'fotnoter': 'footnotes',
# u'citeringar': 'citations',
}
"""Swedish name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
u'abbreviation (translation required)': 'abbreviation',
u'acronym (translation required)': 'acronym',
u'code (translation required)': 'code',
u'index (translation required)': 'index',
u'subscript (translation required)': 'subscript',
u'superscript (translation required)': 'superscript',
u'title-reference (translation required)': 'title-reference',
u'pep-reference (translation required)': 'pep-reference',
u'rfc-reference (translation required)': 'rfc-reference',
u'emphasis (translation required)': 'emphasis',
u'strong (translation required)': 'strong',
u'literal (translation required)': 'literal',
'math (translation required)': 'math',
u'named-reference (translation required)': 'named-reference',
u'anonymous-reference (translation required)': 'anonymous-reference',
u'footnote-reference (translation required)': 'footnote-reference',
u'citation-reference (translation required)': 'citation-reference',
u'substitution-reference (translation required)': 'substitution-reference',
u'target (translation required)': 'target',
u'uri-reference (translation required)': 'uri-reference',
u'r\u00e5': 'raw',}
"""Mapping of Swedish role names to canonical role names for interpreted text.
"""
|
projectatomic/anaconda
|
refs/heads/rhel7-atomic
|
utils/log_picker/__init__.py
|
5
|
#!/usr/bin/python
import os
import sys
import tempfile
import log_picker.archiving as archiving
from log_picker.archiving import ArchivationError
from log_picker.archiving import NoFilesArchivationError
import log_picker.sending as sending
from log_picker.sending import SenderError
import log_picker.logmining as logmining
from log_picker.logmining import LogMinerError
class LogPickerError(Exception):
pass
class LogPicker(object):
def __init__(self, archive_obj=None, sender_obj=None, miners=[],
use_one_file=False):
self.sender_obj = sender_obj
self.archive_obj = archive_obj
self.miners = miners
self.archive = None
self.tmpdir = None
self.files = []
self.filename = self._get_tmp_file("completelog") if use_one_file else None
def _errprint(self, msg):
"""Print message on stderr."""
sys.stderr.write('%s\n' % msg)
def _get_tmp_file(self, name, suffix="", register=True):
"""Create temp file."""
if not self.tmpdir:
self.tmpdir = tempfile.mkdtemp(prefix="lp-logs-", dir="/tmp")
name += suffix
filename = os.path.join(self.tmpdir, name)
open(filename, 'w') # Create empty file
if register:
self.files.append(filename)
return filename
def create_archive(self, name=""):
"""Create archive (one file) containing multiple log files."""
name = name or self.tmpdir or "logs"
self.archive = self._get_tmp_file(name,
suffix=self.archive_obj.file_ext, register=False)
try:
self.archive_obj.create_archive(self.archive, self.files)
except (ArchivationError):
os.remove(self.archive)
raise
def send(self):
"""Send log/archive with logs via sender object."""
if not len(self.files):
return
if not self.archive and len(self.files) > 1:
raise LogPickerError('More than one file to send. ' + \
'You have to create archive. Use create_archive() method.')
file = self.files[0]
contenttype = "text/plain"
if self.archive:
file = self.archive
contenttype = self.archive_obj.mimetype
self.sender_obj.sendfile(file, contenttype)
def getlogs(self):
"""Collect logs generated by miners passed to the constructor."""
# self.filename != None means that we should put all logs into one file.
# self.filename == None means that every log should have its own file.
if self.filename:
f = open(self.filename, 'w')
for miner in self.miners:
if not self.filename:
tmpfilename = self._get_tmp_file(miner.get_filename())
f = open(tmpfilename, 'w')
desc = "%s\n\n" % (miner.get_description())
f.write(desc)
try:
miner.set_logfile(f)
miner.getlog()
except (LogMinerError) as e:
self._errprint("Warning: %s - %s" % (miner._name, e))
f.write("\n%s\n\n\n" % e)
if not self.filename:
f.close()
# XXX Cut our anaconda dump into pieces.
if isinstance(miner, logmining.AnacondaLogMiner):
self._cut_to_pieces(tmpfilename)
if self.filename:
f.close()
def _cut_to_pieces(self, filename):
"""Create multiple log files from Anaconda dump.
Attention: Anaconda dump file on input will be used and overwritten!
@filename file with Anaconda dump"""
actual_file = os.path.basename(filename)
files = {actual_file: []}
empty_lines = 0
# Split file into memmory
for line in open(filename):
striped = line.strip()
if not striped:
empty_lines += 1
elif empty_lines > 1 and striped.startswith('/') \
and striped.endswith(':') and len(line) > 2:
actual_file = striped[:-1].rsplit('/', 1)[-1]#.replace('.', '-')
files[actual_file] = []
empty_lines = 0
files[actual_file].append(line)
# Overwrite original file
actual_file = os.path.basename(filename)
open(filename, 'w').writelines(files[actual_file])
del files[actual_file]
# Write other individual files
for file in files:
open(self._get_tmp_file(file), 'w').writelines(files[file])
|
nirmeshk/oh-mainline
|
refs/heads/master
|
mysite/search/migrations/0044_answer_can_have_non_logged_in_author.py
|
17
|
# This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.search.models import *
class Migration:
def forwards(self, orm):
# Adding field 'Answer.author_name'
db.add_column('search_answer', 'author_name', orm['search.answer:author_name'])
# Changing field 'HitCountCache.created_date'
# (to signature: django.db.models.fields.DateTimeField(auto_now_add=True, null=True, blank=True))
db.alter_column('search_hitcountcache', 'created_date', orm['search.hitcountcache:created_date'])
# Changing field 'Project.created_date'
# (to signature: django.db.models.fields.DateTimeField(auto_now_add=True, null=True, blank=True))
db.alter_column('search_project', 'created_date', orm['search.project:created_date'])
# Changing field 'ProjectInvolvementQuestion.created_date'
# (to signature: django.db.models.fields.DateTimeField(auto_now_add=True, null=True, blank=True))
db.alter_column('search_projectinvolvementquestion', 'created_date', orm['search.projectinvolvementquestion:created_date'])
# Changing field 'Bug.created_date'
# (to signature: django.db.models.fields.DateTimeField(auto_now_add=True, null=True, blank=True))
db.alter_column('search_bug', 'created_date', orm['search.bug:created_date'])
# Changing field 'Answer.author'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['auth.User'], null=True))
db.alter_column('search_answer', 'author_id', orm['search.answer:author'])
# Changing field 'Answer.created_date'
# (to signature: django.db.models.fields.DateTimeField(auto_now_add=True, null=True, blank=True))
db.alter_column('search_answer', 'created_date', orm['search.answer:created_date'])
def backwards(self, orm):
# Deleting field 'Answer.author_name'
db.delete_column('search_answer', 'author_name')
# Changing field 'HitCountCache.created_date'
# (to signature: django.db.models.fields.DateTimeField(auto_now_add=True, blank=True))
db.alter_column('search_hitcountcache', 'created_date', orm['search.hitcountcache:created_date'])
# Changing field 'Project.created_date'
# (to signature: django.db.models.fields.DateTimeField(auto_now_add=True, blank=True))
db.alter_column('search_project', 'created_date', orm['search.project:created_date'])
# Changing field 'ProjectInvolvementQuestion.created_date'
# (to signature: django.db.models.fields.DateTimeField(auto_now_add=True, blank=True))
db.alter_column('search_projectinvolvementquestion', 'created_date', orm['search.projectinvolvementquestion:created_date'])
# Changing field 'Bug.created_date'
# (to signature: django.db.models.fields.DateTimeField(auto_now_add=True, blank=True))
db.alter_column('search_bug', 'created_date', orm['search.bug:created_date'])
# Changing field 'Answer.author'
# (to signature: django.db.models.fields.related.ForeignKey(to=orm['auth.User']))
db.alter_column('search_answer', 'author_id', orm['search.answer:author'])
# Changing field 'Answer.created_date'
# (to signature: django.db.models.fields.DateTimeField(auto_now_add=True, blank=True))
db.alter_column('search_answer', 'created_date', orm['search.answer:created_date'])
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'search.answer': {
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['search.ProjectInvolvementQuestion']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'search.bug': {
'as_appears_in_distribution': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'bize_size_tag_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'canonical_bug_link': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'concerns_just_documentation': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_reported': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'good_for_newcomers': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_polled': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1970, 1, 1, 0, 0)'}),
'last_touched': ('django.db.models.fields.DateTimeField', [], {}),
'looks_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'people_involved': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['search.Project']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'submitter_realname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'submitter_username': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'search.hitcountcache': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'hashed_query': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'hit_count': ('django.db.models.fields.IntegerField', [], {}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'search.project': {
'cached_contributor_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_icon_was_fetched_from_ohloh': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'icon_for_profile': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_for_search_result': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_raw': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_smaller_for_badge': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True'}),
'icon_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'logo_contains_name': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'search.projectinvolvementquestion': {
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bug_style': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'key_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['search']
|
JuliBakagianni/META-SHARE
|
refs/heads/master
|
misc/tools/generateDS-2.7a/Demos/Xmlbehavior/xmlbehavior_sub.py
|
33
|
#!/usr/bin/env python
#
# Generated Tue Jun 29 16:14:16 2004 by generateDS.py.
#
import sys
from xml.dom import minidom
from xml.sax import handler, make_parser
import xmlbehavior as supermod
class xml_behaviorSub(supermod.xml_behavior):
def __init__(self, base_impl_url='', behaviors=None):
supermod.xml_behavior.__init__(self, base_impl_url, behaviors)
def get_class_dictionary(self):
return self.classDictionary
#
# Make a dictionary whose keys are class names and whose
# values are the behaviors for that class.
def make_class_dictionary(self, cleanupNameFunc):
self.classDictionary = {}
self.behaviors.make_class_dictionary(self.classDictionary, cleanupNameFunc)
supermod.xml_behavior.subclass = xml_behaviorSub
# end class xml_behaviorSub
class behaviorsSub(supermod.behaviors):
def __init__(self, behavior=None):
supermod.behaviors.__init__(self, behavior)
def make_class_dictionary(self, classDictionary, cleanupNameFunc):
for behavior in self.behavior:
behavior.make_class_dictionary(classDictionary, cleanupNameFunc)
supermod.behaviors.subclass = behaviorsSub
# end class behaviorsSub
class behaviorSub(supermod.behavior):
def __init__(self, klass='', name='', return_type='', args=None, impl_url=''):
supermod.behavior.__init__(self, klass, name, return_type, args, impl_url)
def make_class_dictionary(self, classDictionary, cleanupNameFunc):
className = cleanupNameFunc(self.klass)
if className not in classDictionary:
classDictionary[className] = []
classDictionary[className].append(self)
supermod.behavior.subclass = behaviorSub
# end class behaviorSub
class argsSub(supermod.args):
def __init__(self, arg=None):
supermod.args.__init__(self, arg)
supermod.args.subclass = argsSub
# end class argsSub
class argSub(supermod.arg):
def __init__(self, name='', data_type=''):
supermod.arg.__init__(self, name, data_type)
supermod.arg.subclass = argSub
# end class argSub
class ancillariesSub(supermod.ancillaries):
def __init__(self, ancillary=None):
supermod.ancillaries.__init__(self, ancillary)
#
# XMLBehaviors
#
supermod.ancillaries.subclass = ancillariesSub
# end class ancillariesSub
class ancillarySub(supermod.ancillary):
def __init__(self, klass='', role='', return_type='', name='', args=None, impl_url=''):
supermod.ancillary.__init__(self, klass, role, return_type, name, args, impl_url)
supermod.ancillary.subclass = ancillarySub
# end class ancillarySub
def saxParse(inFileName):
parser = make_parser()
documentHandler = supermod.SaxXml_behaviorHandler()
parser.setDocumentHandler(documentHandler)
parser.parse('file:%s' % inFileName)
rootObj = documentHandler.getRoot()
#sys.stdout.write('<?xml version="1.0" ?>\n')
#rootObj.export(sys.stdout, 0)
return rootObj
def saxParseString(inString):
parser = make_parser()
documentHandler = supermod.SaxContentHandler()
parser.setDocumentHandler(documentHandler)
parser.feed(inString)
parser.close()
rootObj = documentHandler.getRoot()
#sys.stdout.write('<?xml version="1.0" ?>\n')
#rootObj.export(sys.stdout, 0)
return rootObj
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.childNodes[0]
rootObj = supermod.xml_behavior.factory()
rootObj.build(rootNode)
#sys.stdout.write('<?xml version="1.0" ?>\n')
#rootObj.export(sys.stdout, 0)
doc = None
return rootObj
def parseString(inString):
doc = minidom.parseString(inString)
rootNode = doc.childNodes[0]
rootObj = supermod.xml_behavior.factory()
rootObj.build(rootNode)
doc = None
#sys.stdout.write('<?xml version="1.0" ?>\n')
#rootObj.export(sys.stdout, 0)
return rootObj
def parseLiteral(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.childNodes[0]
rootObj = supermod.xml_behavior.factory()
rootObj.build(rootNode)
#sys.stdout.write('from xmlbehavior_sub import *\n\n')
#sys.stdout.write('rootObj = xml_behavior(\n')
#rootObj.exportLiteral(sys.stdout, 0)
#sys.stdout.write(')\n')
doc = None
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print USAGE_TEXT
sys.exit(-1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
root = parse(infilename)
sys.stdout.write('<?xml version="1.0" ?>\n')
root.export(sys.stdout, 0)
if __name__ == '__main__':
main()
#import pdb
#pdb.run('main()')
|
shaunstanislaus/magic-wormhole
|
refs/heads/master
|
src/wormhole/__init__.py
|
73
|
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
benpatterson/edx-platform
|
refs/heads/master
|
lms/djangoapps/shoppingcart/migrations/0006_auto__add_field_order_refunded_time__add_field_orderitem_refund_reques.py
|
114
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Order.refunded_time'
db.add_column('shoppingcart_order', 'refunded_time',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
# Adding field 'OrderItem.refund_requested_time'
db.add_column('shoppingcart_orderitem', 'refund_requested_time',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Order.refunded_time'
db.delete_column('shoppingcart_order', 'refunded_time')
# Deleting field 'OrderItem.refund_requested_time'
db.delete_column('shoppingcart_orderitem', 'refund_requested_time')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
|
magopian/amo-validator
|
refs/heads/master
|
tests/test_bug_626878.py
|
8
|
from js_helper import _do_test
def test_double_escaped():
"""Test that escaped characters don't result in errors."""
err = _do_test('tests/resources/bug_626878.js')
assert not err.message_count
|
izonder/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/conf/locale/ka/__init__.py
|
12133432
| |
alexlo03/ansible
|
refs/heads/devel
|
test/units/modules/network/ios/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.